diff --git a/.gitignore b/.gitignore index 800b2944..fdcbad43 100644 --- a/.gitignore +++ b/.gitignore @@ -26,7 +26,8 @@ makeQt # Ignore unpacked sources in the main directory (match pkg-ver) /*[-_][0-9]* -# Include scotch +# Include decomposition library sources !/scotch_6.0.9 +!/Zoltan-3.90 # End-of-file diff --git a/Zoltan-3.90/CMakeLists.txt b/Zoltan-3.90/CMakeLists.txt new file mode 100644 index 00000000..717d98d7 --- /dev/null +++ b/Zoltan-3.90/CMakeLists.txt @@ -0,0 +1,133 @@ + + +# +# A) Define the package +# + +TRIBITS_PACKAGE(Zoltan) + +# +# B) Set up package-specific options +# + +TRIBITS_ADD_OPTION_AND_DEFINE( ${PACKAGE_NAME}_ENABLE_UINT_IDS + UNSIGNED_INT_GLOBAL_IDS + "typedef unsigned int ZOLTAN_ID_TYPE" + OFF ) + +TRIBITS_ADD_OPTION_AND_DEFINE( ${PACKAGE_NAME}_ENABLE_ULONG_IDS + UNSIGNED_LONG_GLOBAL_IDS + "typedef unsigned long ZOLTAN_ID_TYPE" + OFF ) + +TRIBITS_ADD_OPTION_AND_DEFINE( ${PACKAGE_NAME}_ENABLE_ULLONG_IDS + UNSIGNED_LONG_LONG_GLOBAL_IDS + "typedef unsigned long long ZOLTAN_ID_TYPE" + OFF ) + +TRIBITS_ADD_OPTION_AND_DEFINE(${PACKAGE_NAME}_ENABLE_HUND + HAVE_ZOLTAN_HUND + "Enable support for HUND in ${PACKAGE_NAME}." + OFF ) + +TRIBITS_ADD_OPTION_AND_DEFINE(${PACKAGE_NAME}_ENABLE_KNUTH_HASH + HAVE_ZOLTAN_KNUTH_HASH + "Enable support for Knuth's hash function in ${PACKAGE_NAME}." + OFF ) + +TRIBITS_ADD_OPTION_AND_DEFINE(${PACKAGE_NAME}_ENABLE_F90INTERFACE + BUILD_ZOLTAN_F90_INTERFACE + "Enable build of F90 interface to ${PACKAGE_NAME}." + OFF ) + +TRIBITS_ADD_OPTION_AND_DEFINE(${PACKAGE_NAME}_ENABLE_PURIFY + HAVE_PURIFY + "Enable work-arounds to purify bugs." + OFF ) + +OPTION(${PACKAGE_NAME}_ENABLE_CPPDRIVER + "Enable C++ driver for ${PACKAGE_NAME}." + ${${PROJECT_NAME}_ENABLE_CXX} ) + +IF (${PROJECT_NAME}_ENABLE_CXX) + SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DMPICH_IGNORE_CXX_SEEK") +ENDIF() + +IF(${PROJECT_NAME}_ENABLE_Fortran AND BUILD_ZOLTAN_F90_INTERFACE) + + # Special Fortran compiler-specific flags + IF (${CMAKE_Fortran_COMPILER_ID} MATCHES "PGI") + SET(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} -DPGI") + SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DPGI") + ENDIF() + + IF (${CMAKE_Fortran_COMPILER_ID} MATCHES "LAHEY") + SET(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} -DFUJITSU") + SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DFUJITSU") + ENDIF() + + SET(ZOLTAN_BUILD_ZFDRIVE YES) +ENDIF() + +# +# C) Add the libraries, tests, and examples +# + +IF (NOT TPL_ENABLE_MPI) + ADD_SUBDIRECTORY(siMPI) + SET(ZOLTAN_BUILD_ZFDRIVE NO) +ENDIF() + +ADD_SUBDIRECTORY(src) + +TRIBITS_ADD_TEST_DIRECTORIES(src/driver) + +IF(ZOLTAN_BUILD_ZFDRIVE) + # CMAKE version > 2.6.4 is needed; earlier versions use C++ compiler to link + # fdriver, while fdriver needs F90. + TRIBITS_ADD_TEST_DIRECTORIES(src/fdriver) +ENDIF() + +TRIBITS_ADD_TEST_DIRECTORIES(test) + +TRIBITS_ADD_EXAMPLE_DIRECTORIES(example) + +TRIBITS_EXCLUDE_FILES( + test/ch_brack2_3 + test/ch_bug + test/ch_degenerate + test/ch_degenerateAA + test/ch_drake + test/ch_grid20x19 + test/ch_hammond + test/ch_hammond2 + test/ch_hughes + test/ch_nograph + test/ch_onedbug + test/ch_random + test/ch_serial + test/ch_slac + test/hg_cage10 + test/hg_diag500_4 + test/hg_ewgt + test/hg_felix + test/hg_ibm03 + test/hg_ml27 + test/hg_nograph + test/hg_vwgt + test/nem_ti_20k + test/nem_ti_4k + test/misc_siefert + test/th + test/bin + doc/Zoltan_html/tu_html + src/ZoltanComponent + src/driver_old + src/fdriver_old + ) + +# +# D) Do standard postprocessing +# + +TRIBITS_PACKAGE_POSTPROCESS() diff --git a/Zoltan-3.90/COPYRIGHT_AND_LICENSE b/Zoltan-3.90/COPYRIGHT_AND_LICENSE new file mode 100644 index 00000000..0af71a43 --- /dev/null +++ b/Zoltan-3.90/COPYRIGHT_AND_LICENSE @@ -0,0 +1,45 @@ +/* + * @HEADER + * + * *********************************************************************** + * + * Zoltan Toolkit for Load-balancing, Partitioning, Ordering and Coloring + * Copyright 2012 Sandia Corporation + * + * Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, + * the U.S. Government retains certain rights in this software. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. Neither the name of the Corporation nor the names of the + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Questions? Contact Karen Devine kddevin@sandia.gov + * Erik Boman egboman@sandia.gov + * + * *********************************************************************** + * + * @HEADER + */ diff --git a/Zoltan-3.90/Disclaimer b/Zoltan-3.90/Disclaimer new file mode 100644 index 00000000..647ed961 --- /dev/null +++ b/Zoltan-3.90/Disclaimer @@ -0,0 +1,17 @@ + +Zoltan Library for Parallel Applications + +Neither Sandia, the government, the DOE, nor any of their employees, makes +any warranty, express or implied, or assumes any legal liability or +responsibility for the accuracy, completeness, or usefulness of any +information, apparatus, product, or process disclosed, or represents that +its use would not infringe privately owned rights. This information is made +available on an "AS-IS" basis. +ALL WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +EXCLUDED HEREUNDER. +Neither Sandia nor the Government, nor their agents, officers and employees +shall be liable for any loss, damage (including, incidental, consequential +and special), injury or other casualty of whatsoever kind, or by whomsoever +caused, to the person or property of anyone arising out of or resulting from +this information, or the accuracy and validity of this information. diff --git a/Zoltan-3.90/Known_Problems b/Zoltan-3.90/Known_Problems new file mode 100644 index 00000000..925cb370 --- /dev/null +++ b/Zoltan-3.90/Known_Problems @@ -0,0 +1,251 @@ +Problems existing in Zoltan. +This file was last updated on $Date$ + +------------------------------------------------------------------------------- +ERROR CONDITIONS IN ZOLTAN +When a processor returns from Zoltan to the application due to an error +condition, other processors do not necessarily return the same condition. +In fact, other processors may not know that the processor has quit Zoltan, +and may hang in a communication (waiting for a message that is not sent +due to the error condition). The parallel error-handling capabilities of +Zoltan will be improved in future releases. +------------------------------------------------------------------------------- +RCB/RIB ON ASCI RED +On ASCI Red, the number of context IDs (e.g., MPI Communicators) is limited +to 8192. The environment variable MPI_REUSE_CONTEXT_IDS must be set to +reuse the IDs; setting this variable, however, slows performance. +An alternative is to set Zoltan_Parameter TFLOPS_SPECIAL to "1". With +TFLOPS_SPECIAL set, communicators in RCB/RIB are not split and, thus, the +application is less likely to run out of context IDs. However, ASCI Red +also has a bug that is exposed by TFLOPS_SPECIAL; when messages that use +MPI_Send/MPI_Recv within RCB/RIB exceed the MPI_SHORT_MSG_SIZE, MPI_Recv +hangs. We do not expect these conditions to exist on future platforms and, +indeed, plan to make TFLOPS_SPECIAL obsolete in future versions of Zoltan +rather than re-work it with MPI_Irecv. -- KDD 10/5/2004 +------------------------------------------------------------------------------- +ERROR CONDITIONS IN OCTREE, PARMETIS AND JOSTLE +On failure, OCTREE, ParMETIS and Jostle methods abort rather than return +error codes. +------------------------------------------------------------------------------- +ZOLTAN_INITIALIZE BUT NO ZOLTAN_FINALIZE +If Zoltan_Initialize calls MPI_Init, then MPI_Finalize +will never be called because there is no Zoltan_Finalize routine. +If the application uses MPI and calls MPI_Init and MPI_Finalize, +then there is no problem. +------------------------------------------------------------------------------- +HETEROGENEOUS ENVIRONMENTS +Some parts of Zoltan currently assume that basic data types like +integers and real numbers (floats) have identical representation +on all processors. This may not be true in a heterogeneous +environment. Specifically, the unstructured (irregular) communication +library is unsafe in a heterogeneous environment. This problem +will be corrected in a future release of Zoltan for heterogeneous +systems. +------------------------------------------------------------------------------- +F90 ISSUES +Pacific Sierra Research (PSR) Vastf90 is not currently supported due to bugs +in the compiler with no known workarounds. It is not known when or if this +compiler will be supported. + +N.A.Software FortranPlus is not currently supported due to problems with the +query functions. We anticipate that this problem can be overcome, and support +will be added soon. +------------------------------------------------------------------------------- +PROBLEMS EXISTING IN PARMETIS +(Reported to the ParMETIS development team at the University of Minnesota, + metis@cs.umn.edu) + +Name: Free-memory write in PartGeomKway +Version: ParMETIS 3.1.1 +Symptom: Free-memory write reported by Purify and Valgrind for graphs with + no edges. +Description: + For input graphs with no (or, perhaps, few) edges, Purify and Valgrind + report writes to already freed memory as shown below. +FMW: Free memory write: + * This is occurring while in thread 22199: + SetUp(void) [setup.c:80] + PartitionSmallGraph(void) [weird.c:39] + ParMETIS_V3_PartGeomKway [gkmetis.c:214] + Zoltan_ParMetis [parmetis_interface.c:280] + Zoltan_LB [lb_balance.c:384] + Zoltan_LB_Partition [lb_balance.c:91] + run_zoltan [dr_loadbal.c:581] + main [dr_main.c:386] + __libc_start_main [libc.so.6] + _start [crt1.o] + * Writing 4 bytes to 0xfcd298 in the heap. + * Address 0xfcd298 is at the beginning of a freed block of 4 bytes. + * This block was allocated from thread -1781075296: + malloc [rtlib.o] + GKmalloc(void) [util.c:151] + idxmalloc(void) [util.c:100] + AllocateWSpace [memory.c:28] + ParMETIS_V3_PartGeomKway [gkmetis.c:123] + Zoltan_ParMetis [parmetis_interface.c:280] + Zoltan_LB [lb_balance.c:384] + Zoltan_LB_Partition [lb_balance.c:91] + run_zoltan [dr_loadbal.c:581] + main [dr_main.c:386] + __libc_start_main [libc.so.6] + _start [crt1.o] + * There have been 10 frees since this block was freed from thread 22199: + GKfree(void) [util.c:168] + Mc_MoveGraph(void) [move.c:92] + ParMETIS_V3_PartGeomKway [gkmetis.c:149] + Zoltan_ParMetis [parmetis_interface.c:280] + Zoltan_LB [lb_balance.c:384] + Zoltan_LB_Partition [lb_balance.c:91] + run_zoltan [dr_loadbal.c:581] + main [dr_main.c:386] + __libc_start_main [libc.so.6] + _start [crt1.o] +Reported: Reported 8/31/09 http://glaros.dtc.umn.edu/flyspray/task/50 +Status: Reported 8/31/09 + +Name: PartGeom limitation +Version: ParMETIS 3.0, 3.1 +Symptom: inaccurate number of partitions when # partitions != # processors +Description: + ParMETIS method PartGeom produces decompositions with #-processor + partitions only. Zoltan parameters NUM_GLOBAL_PARTITIONS and + NUM_LOCAL_PARTITIONS will be ignored. +Reported: Not yet reported. +Status: Not yet reported. + +Name: vsize array freed in ParMetis +Version: ParMETIS 3.0 and 3.1 +Symptom: seg. fault, core dump at runtime +Description: + When calling ParMETIS_V3_AdaptiveRepart with the vsize parameter, + ParMetis will try to free the vsize array even if it was + allocated in Zoltan. Zoltan will then try to free vsize again + later, resulting in a fatal error. As a temporary fix, + Zoltan will never call ParMetis with the vsize parameter. +Reported: 11/25/2003. +Status: Acknowledged by George Karypis. + +Name: ParMETIS_V3_AdaptiveRepart and ParMETIS_V3_PartKWay crash + for zero-sized partitions. +Version: ParMETIS 3.1 +Symptom: run-time error "killed by signal 8" on DEC. FPE, divide-by-zero. +Description: + Metis divides by partition size; thus, zero-sized partitions + cause a floating-point exception. +Reported: 9/9/2003. +Status: ? + +Name: ParMETIS_V3_AdaptiveRepart dies for zero-sized partitions. +Version: ParMETIS 3.0 +Symptom: run-time error "killed by signal 8" on DEC. FPE, divide-by-zero. +Description: + ParMETIS_V3_AdaptiveRepart divides by partition size; thus, zero-sized + partitions cause a floating-point exception. This problem is exhibited in + adaptive-partlocal3 tests. The tests actually run on Sun and Linux machines + (which don't seem to care about the divide-by-zero), but cause an FPE + signal on DEC (Compaq) machines. +Reported: 1/23/2003. +Status: Fixed in ParMetis 3.1, but new problem appeared (see above). + +Name: ParMETIS_V3_AdaptiveRepart crashes when no edges. +Version: ParMETIS 3.0 +Symptom: Floating point exception, divide-by-zero. +Description: + Divide-by-zero in ParMETISLib/adrivers.c, function Adaptive_Partition, + line 40. +Reported: 1/23/2003. +Status: Fixed in ParMetis 3.1. + +Name: Uninitialized memory read in akwayfm.c. +Version: ParMETIS 3.0 +Symptom: UMR warning. +Description: + UMR in ParMETISLib/akwayfm.c, function Moc_KWayAdaptiveRefine, near line 520. +Reported: 1/23/2003. +Status: Fixed in ParMetis 3.1. + +Name: Memory leak in wave.c +Version: ParMETIS 3.0 +Symptom: Some memory not freed. +Description: + Memory leak in ParMETISLib/wave.c, function WavefrontDiffusion; + memory for the following variables is not always freed: + solution, perm, workspace, cand + We believe the early return near line 111 causes the problem. +Reported: 1/23/2003. +Status: Fixed in ParMetis 3.1. + +Name: tpwgts ignored for small graphs. +Version: ParMETIS 3.0 +Symptom: incorrect output (partitioning) +Description: + When using ParMETIS_V3_PartKway to partition into partitions + of unequal sizes, the input array tpwgts is ignored and + uniform-sized partitions are computed. This bug shows up when + (a) the number of vertices is < 10000 and (b) only one weight + per vertex is given (ncon=1). +Reported: Reported to George Karypis and metis@cs.umn.edu on 2002/10/30. +Status: Fixed in ParMetis 3.1. + + +Name: AdaptiveRepart crashes on partless test. +Version: ParMETIS 3.0 +Symptom: run-time segmentation violation. +Description: + ParMETIS_V3_AdaptiveRepart crashes with a SIGSEGV if + the input array _part_ contains any value greater then + the desired number of partitions, nparts. This shows up + in Zoltan's "partless" test cases. +Reported: Reported to George Karypis and metis@cs.umn.edu on 2002/12/02. +Status: Fixed in ParMetis 3.1. + + +Name: load imbalance tolerance +Version: ParMETIS 2.0 +Symptom: missing feature +Description: + The load imbalance parameter UNBALANCE_FRACTION can + only be set at compile-time. With Zoltan it is + necessary to be able to set this parameter at run-time. +Reported: Reported to metis@cs.umn.edu on 19 Aug 1999. +Status: Fixed in version 3.0. + + +Name: no edges +Version: ParMETIS 2.0 +Symptom: segmentation fault at run time +Description: + ParMETIS crashes if the input graph has no edges and + ParMETIS_PartKway is called. We suspect all the graph based + methods crash. From the documentation it is unclear if + a NULL pointer is a valid input for the adjncy array. + Apparently, the bug occurs both with NULL as input or + a valid pointer to an array. +Reported: Reported to metis@cs.umn.edu on 5 Oct 1999. +Status: Fixed in version 3.0. + + +Name: no vertices +Version: ParMETIS 2.0, 3.0, 3.1 +Symptom: segmentation fault at run time +Description: + ParMETIS may crash if a processor owns no vertices. + The extent of this bug is not known (which methods are affected). + Again, it is unclear if NULL pointers are valid input. +Reported: Reported to metis@cs.umn.edu on 6 Oct 1999. +Status: Fixed in 3.0 and 3.1 for the graph methods, but not the geometric methods. + New bug report sent on 2003/08/20. + + +Name: partgeom bug +Version: ParMETIS 2.0 +Symptom: floating point exception +Description: + For domains where the global delta_x, delta_y, or delta_z (in 3D) + is zero (e.g., all nodes lie along the y-axis), a floating point + exception can occur when the partgeom algorithm is used. +Reported: kirk@cs.umn.edu in Jan 2001. +Status: Fixed in version 3.0. + +------------------------------------------------------------------------------- + diff --git a/Zoltan-3.90/Makefile.am b/Zoltan-3.90/Makefile.am new file mode 100644 index 00000000..77d47b6e --- /dev/null +++ b/Zoltan-3.90/Makefile.am @@ -0,0 +1,370 @@ +# @HEADER +# +######################################################################## +# +# Zoltan Toolkit for Load-balancing, Partitioning, Ordering and Coloring +# Copyright 2012 Sandia Corporation +# +# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, +# the U.S. Government retains certain rights in this software. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# 3. Neither the name of the Corporation nor the names of the +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# Questions? Contact Karen Devine kddevin@sandia.gov +# Erik Boman egboman@sandia.gov +# +######################################################################## +# +# @HEADER + + +## ####################################################################### +## Options to automake (rarely used - don't worry about it) +## ####################################################################### +AUTOMAKE_OPTIONS = foreign + +## ####################################################################### +## Aclocal command (rarely used - don't worry about it) +## ####################################################################### +ACLOCAL_AMFLAGS = -I config + +## ####################################################################### +## Additional files to be included in distribution for 'make dist' +## ####################################################################### +ZOLTAN_TESTS = \ + test/test_zoltan \ + test/runtests \ + test/ctest_zoltan.pl \ + test/ch_simple \ + test/hg_simple + +ZOLTAN_DOCS = \ + doc/Zoltan_html/Zoltan.html \ + doc/Zoltan_html/Zoltan_FAQ.html \ + doc/Zoltan_html/Zoltan_bugreport.html \ + doc/Zoltan_html/Zoltan_cite.html \ + doc/Zoltan_html/Zoltan_construction.html \ + doc/Zoltan_html/Zoltan_phil.html \ + doc/Zoltan_html/Zoltan_pubs.html \ + doc/Zoltan_html/dev_html/brack3d.png \ + doc/Zoltan_html/dev_html/dev.html \ + doc/Zoltan_html/dev_html/dev_add.html \ + doc/Zoltan_html/dev_html/dev_add_interface.html \ + doc/Zoltan_html/dev_html/dev_add_lb.html \ + doc/Zoltan_html/dev_html/dev_add_memory.html \ + doc/Zoltan_html/dev_html/dev_add_params.html \ + doc/Zoltan_html/dev_html/dev_add_remap.html \ + doc/Zoltan_html/dev_html/dev_add_struct.html \ + doc/Zoltan_html/dev_html/dev_cpp.html \ + doc/Zoltan_html/dev_html/dev_degenerate.html \ + doc/Zoltan_html/dev_html/dev_dist.html \ + doc/Zoltan_html/dev_html/dev_dist_compile.html \ + doc/Zoltan_html/dev_html/dev_dist_cvs.html \ + doc/Zoltan_html/dev_html/dev_dist_dir.html \ + doc/Zoltan_html/dev_html/dev_driver.html \ + doc/Zoltan_html/dev_html/dev_fortran.html \ + doc/Zoltan_html/dev_html/dev_hier.html \ + doc/Zoltan_html/dev_html/dev_hsfc.html \ + doc/Zoltan_html/dev_html/dev_intro.html \ + doc/Zoltan_html/dev_html/dev_intro_coding.html \ + doc/Zoltan_html/dev_html/dev_intro_philosophy.html \ + doc/Zoltan_html/dev_html/dev_intro_sqe.html \ + doc/Zoltan_html/dev_html/dev_lb.html \ + doc/Zoltan_html/dev_html/dev_lb_interface.html \ + doc/Zoltan_html/dev_html/dev_lb_structs.html \ + doc/Zoltan_html/dev_html/dev_lb_types.html \ + doc/Zoltan_html/dev_html/dev_mig.html \ + doc/Zoltan_html/dev_html/dev_parmetis.html \ + doc/Zoltan_html/dev_html/dev_phg.html \ + doc/Zoltan_html/dev_html/dev_rcb.html \ + doc/Zoltan_html/dev_html/dev_refs.html \ + doc/Zoltan_html/dev_html/dev_reftree.html \ + doc/Zoltan_html/dev_html/dev_rib.html \ + doc/Zoltan_html/dev_html/dev_services.html \ + doc/Zoltan_html/dev_html/dev_services_debug.html \ + doc/Zoltan_html/dev_html/dev_services_hash.html \ + doc/Zoltan_html/dev_html/dev_services_objlist.html \ + doc/Zoltan_html/dev_html/dev_services_parallel.html \ + doc/Zoltan_html/dev_html/dev_services_params.html \ + doc/Zoltan_html/dev_html/dev_services_time.html \ + doc/Zoltan_html/dev_html/dev_services_zoltantimer.html \ + doc/Zoltan_html/dev_html/dev_test_script.html \ + doc/Zoltan_html/dev_html/dev_view.html \ + doc/Zoltan_html/dev_html/film2d.png \ + doc/Zoltan_html/dev_html/hammondMesh.png \ + doc/Zoltan_html/dev_html/hammondPoints.png \ + doc/Zoltan_html/dev_html/zdrive.inp \ + doc/Zoltan_html/ug_html/figures/arrow.gif \ + doc/Zoltan_html/ug_html/figures/HGFigure.gif \ + doc/Zoltan_html/ug_html/figures/hierexample.fig \ + doc/Zoltan_html/ug_html/figures/hierexample.gif \ + doc/Zoltan_html/ug_html/figures/Z.gif \ + doc/Zoltan_html/ug_html/ug.html \ + doc/Zoltan_html/ug_html/ug_alg.html \ + doc/Zoltan_html/ug_html/ug_alg_block.html \ + doc/Zoltan_html/ug_html/ug_alg_geom.html \ + doc/Zoltan_html/ug_html/ug_alg_graph.html \ + doc/Zoltan_html/ug_html/ug_alg_hier.html \ + doc/Zoltan_html/ug_html/ug_alg_hsfc.html \ + doc/Zoltan_html/ug_html/ug_alg_hypergraph.html \ + doc/Zoltan_html/ug_html/ug_alg_jostle.html \ + doc/Zoltan_html/ug_html/ug_alg_oct.html \ + doc/Zoltan_html/ug_html/ug_alg_parkway.html \ + doc/Zoltan_html/ug_html/ug_alg_parmetis.html \ + doc/Zoltan_html/ug_html/ug_alg_patoh.html \ + doc/Zoltan_html/ug_html/ug_alg_phg.html \ + doc/Zoltan_html/ug_html/ug_alg_ptscotch.html \ + doc/Zoltan_html/ug_html/ug_alg_random.html \ + doc/Zoltan_html/ug_html/ug_alg_rcb.html \ + doc/Zoltan_html/ug_html/ug_alg_reftree.html \ + doc/Zoltan_html/ug_html/ug_alg_rib.html \ + doc/Zoltan_html/ug_html/ug_alg_simple.html \ + doc/Zoltan_html/ug_html/ug_backward.html \ + doc/Zoltan_html/ug_html/ug_color.html \ + doc/Zoltan_html/ug_html/ug_color_parallel.html \ + doc/Zoltan_html/ug_html/ug_cpp.html \ + doc/Zoltan_html/ug_html/ug_examples.html \ + doc/Zoltan_html/ug_html/ug_examples_init.html \ + doc/Zoltan_html/ug_html/ug_examples_lb.html \ + doc/Zoltan_html/ug_html/ug_examples_mig.html \ + doc/Zoltan_html/ug_html/ug_examples_query.html \ + doc/Zoltan_html/ug_html/ug_fortran.html \ + doc/Zoltan_html/ug_html/ug_graph_vs_hg.html \ + doc/Zoltan_html/ug_html/ug_index.html \ + doc/Zoltan_html/ug_html/ug_interface.html \ + doc/Zoltan_html/ug_html/ug_interface_augment.html \ + doc/Zoltan_html/ug_html/ug_interface_color.html \ + doc/Zoltan_html/ug_html/ug_interface_init.html \ + doc/Zoltan_html/ug_html/ug_interface_lb.html \ + doc/Zoltan_html/ug_html/ug_interface_mig.html \ + doc/Zoltan_html/ug_html/ug_interface_order.html \ + doc/Zoltan_html/ug_html/ug_intro.html \ + doc/Zoltan_html/ug_html/ug_order.html \ + doc/Zoltan_html/ug_html/ug_order_parmetis.html \ + doc/Zoltan_html/ug_html/ug_order_ptscotch.html \ + doc/Zoltan_html/ug_html/ug_param.html \ + doc/Zoltan_html/ug_html/ug_query.html \ + doc/Zoltan_html/ug_html/ug_query_lb.html \ + doc/Zoltan_html/ug_html/ug_query_mig.html \ + doc/Zoltan_html/ug_html/ug_refs.html \ + doc/Zoltan_html/ug_html/ug_release.html \ + doc/Zoltan_html/ug_html/ug_usage.html \ + doc/Zoltan_html/ug_html/ug_util.html \ + doc/Zoltan_html/ug_html/ug_util_comm.html \ + doc/Zoltan_html/ug_html/ug_util_dd.html \ + doc/Zoltan_html/ug_html/ug_util_mem.html + +ZOLTAN_SAMPLE = \ + SampleConfigurationScripts/mac_osX_no_fortran \ + SampleConfigurationScripts/linux_with_purify_zoltan_only \ + SampleConfigurationScripts/mac_osX_zoltan_only \ + SampleConfigurationScripts/linux_zoltan_only \ + SampleConfigurationScripts/linux_trilinos_runtests \ + SampleConfigurationScripts/linux_zoltan_dist \ + SampleConfigurationScripts/mac_osX_zoltan_dist + + +ZOLTAN_CMAKE = \ + CMakeLists.txt \ + src/CMakeLists.txt \ + src/driver/CMakeLists.txt \ + src/fdriver/CMakeLists.txt \ + test/CMakeLists.txt \ + test/ch_simple/CMakeLists.txt \ + test/hg_simple/CMakeLists.txt \ + cmake/Dependencies.cmake \ + cmake/Zoltan_config.h.in + +EXTRA_DIST = config/generate-makeoptions.pl \ + README.html README.txt \ + config/strip_dup_incl_paths.pl config/strip_dup_libs.pl \ + config/replace-install-prefix.pl config/string-replace.pl \ + config/token-replace.pl \ + Disclaimer GNU_Lesser_GPL.txt Known_Problems VERSION \ + $(ZOLTAN_DOCS) $(ZOLTAN_SAMPLE) $(ZOLTAN_TESTS) + + +## ####################################################################### +## Tools in the auxillary directory +## ####################################################################### +AUX_DIST = config/install-sh config/missing config/mkinstalldirs + +## ####################################################################### +## Files to be deleted by 'make maintainer-clean' +## ####################################################################### +MAINTAINERCLEANFILES = Makefile.in aclocal.m4 autom4te.cache/* \ + configure config.status config.log \ + src/common/config-h.in src/common/stamp-h.in \ + $(AUX_DIST) + +#The following line helps the test harness recover from build errors. + +all-local: + @echo "" + @echo "Trilinos package zoltan built successfully." + @echo "" + +## ####################################################################### +## Subdirectories to be make'd recursively +## ####################################################################### + +#We now build tests and examples through separate make targets, rather than +#during "make". We still need to conditionally include the test and example +#in SUBDIRS, even though BUILD_TESTS and BUILD_EXAMPLES will never be +#defined, so that the tests and examples are included in the distribution +#tarball. + +#Add this later +if SUB_TEST +TEST_SUBDIR= +endif + +if SUB_EXAMPLE +EXAMPLE_SUBDIR=example +endif + +if BUILD_TESTS +tests: examples + @echo "" + @echo "Now building zoltan tests." + @echo "" + @echo "Zoltan tests use drivers and input files; no compilation necessary." + @echo "" + @echo "Finished building zoltan tests." + @echo "" +else +tests: + @echo "zoltan tests were disabled at configure time" +endif + +if BUILD_EXAMPLES +examples: + @echo "" + @echo "Now building zoltan examples." + @echo "" + cd $(top_builddir)/example && $(MAKE) + @echo "" + @echo "Finished building zoltan examples." + @echo "" + +install-examples: + cd $(top_builddir)/example && $(MAKE) install +else +examples: + @echo "zoltan examples were disabled at configure time" + +install-examples: + @echo "zoltan examples were disabled at configure time" +endif + +clean-tests: + cd $(top_builddir)/test && $(MAKE) clean + +clean-examples: + cd $(top_builddir)/example && $(MAKE) clean + +everything: + $(MAKE) && $(MAKE) examples && $(MAKE) tests + +clean-everything: + $(MAKE) clean-examples && $(MAKE) clean-tests && $(MAKE) clean + +install-everything: + $(MAKE) install && $(MAKE) install-examples + +if HAVE_MPI +SIMPI_SUBDIR= +else +SIMPI_SUBDIR=siMPI +endif + +SUBDIRS = $(SIMPI_SUBDIR) src $(EXAMPLE_SUBDIR) $(TEST_SUBDIR) + +## ####################################################################### +## The below targets allow you to use the new +## testharness to run the test suite as make targets +## ####################################################################### + +TRILINOS_HOME_DIR=@abs_top_srcdir@/../.. +TRILINOS_BUILD_DIR=@abs_top_builddir@/../.. +TRILINOS_MPI_MAX_PROC=4 +TRILINOS_TEST_CATEGORY=INSTALL + +runtests-serial : + $(PERL_EXE) $(TRILINOS_HOME_DIR)/commonTools/test/utilities/runtests \ + --trilinos-dir=$(TRILINOS_HOME_DIR) \ + --comm=serial \ + --build-dir=$(TRILINOS_BUILD_DIR) \ + --category=$(TRILINOS_TEST_CATEGORY) \ + --output-dir=@abs_top_builddir@/test/runtests-results \ + --verbosity=1 \ + --packages=zoltan + +runtests-mpi : + $(PERL_EXE) $(TRILINOS_HOME_DIR)/commonTools/test/utilities/runtests \ + --trilinos-dir=$(TRILINOS_HOME_DIR) \ + --comm=mpi \ + --mpi-go=$(TRILINOS_MPI_GO) \ + --build-dir=$(TRILINOS_BUILD_DIR) \ + --max-proc=$(TRILINOS_MPI_MAX_PROC) \ + --category=$(TRILINOS_TEST_CATEGORY) \ + --output-dir=@abs_top_builddir@/test/runtests-results \ + --verbosity=1 \ + --packages=zoltan + +## ####################################################################### +## Export makefile stuff +## ####################################################################### + +if USING_EXPORT_MAKEFILES + +install-exec-hook: + mkdir -p $(DESTDIR)$(includedir) + cp $(top_builddir)/Makefile.export.zoltan $(DESTDIR)$(includedir)/. + $(PERL_EXE) $(top_srcdir)/config/replace-install-prefix.pl \ + --exec-prefix=$(exec_prefix) \ + --my-export-makefile=Makefile.export.zoltan \ + --my-abs-top-srcdir=@abs_top_srcdir@ \ + --my-abs-incl-dirs=@abs_top_builddir@/src:@abs_top_srcdir@/src \ + --my-abs-lib-dirs=@abs_top_builddir@/src + $(PERL_EXE) $(top_srcdir)/config/generate-makeoptions.pl $(top_builddir)/src/Makefile ZOLTAN > $(DESTDIR)$(includedir)/Makefile.export.zoltan.macros + +uninstall-hook: + rm -f $(includedir)/Makefile.export.zoltan + rm -f $(includedir)/Makefile.export.zoltan.macros + +else + +install-exec-hook: + +uninstall-hook: + +endif + +## ####################################################################### + diff --git a/Zoltan-3.90/Makefile.export.zoltan.in b/Zoltan-3.90/Makefile.export.zoltan.in new file mode 100644 index 00000000..5217343f --- /dev/null +++ b/Zoltan-3.90/Makefile.export.zoltan.in @@ -0,0 +1,80 @@ +@HAVE_MPI_FALSE@SIMPI_INCDIR = -I@abs_top_srcdir@/siMPI/pyMPI/siMPI +@HAVE_MPI_FALSE@SIMPI_LIBS = -L@abs_top_builddir@/siMPI/pyMPI/siMPI -lsimpi + +ZOLTAN_INCLUDES = -I@abs_top_builddir@/src/include -I@abs_top_srcdir@/src/include $(SIMPI_INCDIR) + +#@BUILD_FORTRAN90DRIVER_TRUE@ZFORLIBS = @FCLIBS@ +@USE_FORTRAN_TRUE@ZFLIBS = @FLIBS@ +#The following line can be edited in the case that -lm is not available +@USE_FORTRAN_FALSE@ZFLIBS = -lm +#@BUILD_FORTRAN90DRIVER_FALSE@ZFORLIBS = $(ZFLIBS) +ZFORLIBS = $(ZFLIBS) + +ZOLTAN_LIBS = @LDFLAGS@ -L@abs_top_builddir@/src -lzoltan @LIBS@ $(ZFORLIBS) $(SIMPI_LIBS) + +ZOLTAN_LIBRARY_INCLUDES = -I@abs_top_builddir@/src/include -I@abs_top_srcdir@/src/include -I@abs_top_srcdir@/src/all -I@abs_top_srcdir@/src/coloring -I@abs_top_srcdir@/src/ha -I@abs_top_srcdir@/src/hier -I@abs_top_srcdir@/src/hsfc -I@abs_top_srcdir@/src/lb -I@abs_top_srcdir@/src/oct -I@abs_top_srcdir@/src/order -I@abs_top_srcdir@/src/par -I@abs_top_srcdir@/src/params -I@abs_top_srcdir@/src/tpls -I@abs_top_srcdir@/src/ccolamd -I@abs_top_srcdir@/src/phg -I@abs_top_srcdir@/src/rcb -I@abs_top_srcdir@/src/reftree -I@abs_top_srcdir@/src/timer -I@abs_top_srcdir@/src/Utilities/Communication -I@abs_top_srcdir@/src/Utilities/Timer -I@abs_top_srcdir@/src/Utilities/DDirectory -I@abs_top_srcdir@/src/Utilities/Memory -I@abs_top_srcdir@/src/Utilities/shared -I@abs_top_srcdir@/src/zz -I@abs_top_srcdir@/src/graph -I@abs_top_srcdir@/src/matrix -I@abs_top_srcdir@/src/simple $(SIMPI_INCDIR) + +ZOLTAN_DEPS = @abs_top_builddir@/src/libzoltan.a + +############################################################################ +# +# Export MPI settings +# +############################################################################ + +# MPI libraries +ZOLTAN_MPI_LIBS = @MPI_LIBS@ + +# MPI include path +ZOLTAN_MPI_INC = @MPI_INC@ + +# Path to MPI libraries +ZOLTAN_MPI_LIBDIR = @MPI_LIBDIR@ + +# Path to MPI root +ZOLTAN_MPI_DIR = @MPI_DIR@ + +############################################################################ +# +# Export Fortran libraries +# +############################################################################ + +# Fortran & standard math libs +#ZOLTAN_FCLIBS = @FCLIBS@ +ZOLTAN_FLIBS = $(ZFLIBS) + +# Extra libraries +ZOLTAN_EXTRA_LIBS = @LIBS@ + +############################################################################ +# +# Export compiler settings +# +############################################################################ + +# Extra definitions +ZOLTAN_DEFS = @DEFS@ + +# Fortran compilation flags +@BUILD_FORTRAN90DRIVER_TRUE@ZOLTAN_FCFLAGS = @FCFLAGS@ +@USE_FORTRAN_TRUE@ZOLTAN_FFLAGS = @FFLAGS@ + +# C compilation flags +ZOLTAN_CFLAGS = @CFLAGS@ + +# C++ compilation flags +ZOLTAN_CXXFLAGS = @CXXFLAGS@ + +# linker flags +ZOLTAN_LDFLAGS = @LDFLAGS@ + + +############################################################################ +# +# Export the ranlib and archiver to be used +# +############################################################################ + +ZOLTAN_RANLIB = @RANLIB@ + diff --git a/Zoltan-3.90/Makefile.in b/Zoltan-3.90/Makefile.in new file mode 100644 index 00000000..7cd0fba6 --- /dev/null +++ b/Zoltan-3.90/Makefile.in @@ -0,0 +1,1073 @@ +# Makefile.in generated by automake 1.11.3 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, +# 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software +# Foundation, Inc. +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# @HEADER +# +######################################################################## +# +# Zoltan Toolkit for Load-balancing, Partitioning, Ordering and Coloring +# Copyright 2012 Sandia Corporation +# +# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, +# the U.S. Government retains certain rights in this software. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# 3. Neither the name of the Corporation nor the names of the +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# Questions? Contact Karen Devine kddevin@sandia.gov +# Erik Boman egboman@sandia.gov +# +######################################################################## +# +# @HEADER +VPATH = @srcdir@ +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +target_triplet = @target@ +subdir = . +DIST_COMMON = README $(am__configure_deps) $(srcdir)/Makefile.am \ + $(srcdir)/Makefile.export.zoltan.in $(srcdir)/Makefile.in \ + $(top_srcdir)/configure \ + $(top_srcdir)/src/include/Zoltan_config.h.in config/compile \ + config/config.guess config/config.sub config/depcomp \ + config/install-sh config/missing +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/config/ax_f90_module_case.m4 \ + $(top_srcdir)/config/ax_f90_module_flag.m4 \ + $(top_srcdir)/config/tac_arg_check_mpi.m4 \ + $(top_srcdir)/config/tac_arg_enable_export-makefiles.m4 \ + $(top_srcdir)/config/tac_arg_enable_feature.m4 \ + $(top_srcdir)/config/tac_arg_enable_feature_sub.m4 \ + $(top_srcdir)/config/tac_arg_enable_feature_sub_check.m4 \ + $(top_srcdir)/config/tac_arg_enable_option.m4 \ + $(top_srcdir)/config/tac_arg_with_3pl_sub.m4 \ + $(top_srcdir)/config/tac_arg_with_ar.m4 \ + $(top_srcdir)/config/tac_arg_with_flags.m4 \ + $(top_srcdir)/config/tac_arg_with_incdirs.m4 \ + $(top_srcdir)/config/tac_arg_with_libdirs.m4 \ + $(top_srcdir)/config/tac_arg_with_libs.m4 \ + $(top_srcdir)/config/tac_arg_with_package.m4 \ + $(top_srcdir)/config/tac_arg_with_perl.m4 \ + $(top_srcdir)/config/wk_fc_get_vendor.m4 \ + $(top_srcdir)/config/zac_arg_config_mpi.m4 \ + $(top_srcdir)/config/zac_arg_with_id.m4 \ + $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \ + configure.lineno config.status.lineno +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/src/include/Zoltan_config.h +CONFIG_CLEAN_FILES = Makefile.export.zoltan +CONFIG_CLEAN_VPATH_FILES = +SOURCES = +DIST_SOURCES = +RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ + html-recursive info-recursive install-data-recursive \ + install-dvi-recursive install-exec-recursive \ + install-html-recursive install-info-recursive \ + install-pdf-recursive install-ps-recursive install-recursive \ + installcheck-recursive installdirs-recursive pdf-recursive \ + ps-recursive uninstall-recursive +RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ + distclean-recursive maintainer-clean-recursive +AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ + $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ + distdir dist dist-all distcheck +ETAGS = etags +CTAGS = ctags +DIST_SUBDIRS = siMPI src example +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +distdir = $(PACKAGE)-$(VERSION) +top_distdir = $(distdir) +am__remove_distdir = \ + if test -d "$(distdir)"; then \ + find "$(distdir)" -type d ! -perm -200 -exec chmod u+w {} ';' \ + && rm -rf "$(distdir)" \ + || { sleep 5 && rm -rf "$(distdir)"; }; \ + else :; fi +am__relativize = \ + dir0=`pwd`; \ + sed_first='s,^\([^/]*\)/.*$$,\1,'; \ + sed_rest='s,^[^/]*/*,,'; \ + sed_last='s,^.*/\([^/]*\)$$,\1,'; \ + sed_butlast='s,/*[^/]*$$,,'; \ + while test -n "$$dir1"; do \ + first=`echo "$$dir1" | sed -e "$$sed_first"`; \ + if test "$$first" != "."; then \ + if test "$$first" = ".."; then \ + dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ + dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ + else \ + first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ + if test "$$first2" = "$$first"; then \ + dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ + else \ + dir2="../$$dir2"; \ + fi; \ + dir0="$$dir0"/"$$first"; \ + fi; \ + fi; \ + dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ + done; \ + reldir="$$dir2" +DIST_ARCHIVES = $(distdir).tar.gz +GZIP_ENV = --best +distuninstallcheck_listfiles = find . -type f -print +am__distuninstallcheck_listfiles = $(distuninstallcheck_listfiles) \ + | sed 's|^\./|$(prefix)/|' | grep -v '$(infodir)/dir$$' +distcleancheck_listfiles = find . -type f -print +ACLOCAL = @ACLOCAL@ +ALTERNATE_AR = @ALTERNATE_AR@ +AMTAR = @AMTAR@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPPFLAGS = @CPPFLAGS@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EXEEXT = @EXEEXT@ +FC = @FC@ +FCFLAGS = @FCFLAGS@ +FCFLAGS_f = @FCFLAGS_f@ +FCFLAGS_f90 = @FCFLAGS_f90@ +FCLIBS = @FCLIBS@ +FC_MAJOR_VERSION = @FC_MAJOR_VERSION@ +FC_MODNAME = @FC_MODNAME@ +FC_MODNAME_Q = @FC_MODNAME_Q@ +FC_VENDOR = @FC_VENDOR@ +FC_VERSION = @FC_VERSION@ +FC_VERSION_STRING = @FC_VERSION_STRING@ +HAVE_PERL = @HAVE_PERL@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MKDIR_P = @MKDIR_P@ +MPI_CC = @MPI_CC@ +MPI_CXX = @MPI_CXX@ +MPI_FC = @MPI_FC@ +MPI_RECV_LIMIT_FLAG = @MPI_RECV_LIMIT_FLAG@ +OBJEXT = @OBJEXT@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PERL_EXE = @PERL_EXE@ +RANLIB = @RANLIB@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +STRIP = @STRIP@ +VERSION = @VERSION@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_aux_dir = @ac_aux_dir@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_FC = @ac_ct_FC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target = @target@ +target_alias = @target_alias@ +target_cpu = @target_cpu@ +target_os = @target_os@ +target_vendor = @target_vendor@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +AUTOMAKE_OPTIONS = foreign +ACLOCAL_AMFLAGS = -I config +ZOLTAN_TESTS = \ + test/test_zoltan \ + test/runtests \ + test/ctest_zoltan.pl \ + test/ch_simple \ + test/hg_simple + +ZOLTAN_DOCS = \ + doc/Zoltan_html/Zoltan.html \ + doc/Zoltan_html/Zoltan_FAQ.html \ + doc/Zoltan_html/Zoltan_bugreport.html \ + doc/Zoltan_html/Zoltan_cite.html \ + doc/Zoltan_html/Zoltan_construction.html \ + doc/Zoltan_html/Zoltan_phil.html \ + doc/Zoltan_html/Zoltan_pubs.html \ + doc/Zoltan_html/dev_html/brack3d.png \ + doc/Zoltan_html/dev_html/dev.html \ + doc/Zoltan_html/dev_html/dev_add.html \ + doc/Zoltan_html/dev_html/dev_add_interface.html \ + doc/Zoltan_html/dev_html/dev_add_lb.html \ + doc/Zoltan_html/dev_html/dev_add_memory.html \ + doc/Zoltan_html/dev_html/dev_add_params.html \ + doc/Zoltan_html/dev_html/dev_add_remap.html \ + doc/Zoltan_html/dev_html/dev_add_struct.html \ + doc/Zoltan_html/dev_html/dev_cpp.html \ + doc/Zoltan_html/dev_html/dev_degenerate.html \ + doc/Zoltan_html/dev_html/dev_dist.html \ + doc/Zoltan_html/dev_html/dev_dist_compile.html \ + doc/Zoltan_html/dev_html/dev_dist_cvs.html \ + doc/Zoltan_html/dev_html/dev_dist_dir.html \ + doc/Zoltan_html/dev_html/dev_driver.html \ + doc/Zoltan_html/dev_html/dev_fortran.html \ + doc/Zoltan_html/dev_html/dev_hier.html \ + doc/Zoltan_html/dev_html/dev_hsfc.html \ + doc/Zoltan_html/dev_html/dev_intro.html \ + doc/Zoltan_html/dev_html/dev_intro_coding.html \ + doc/Zoltan_html/dev_html/dev_intro_philosophy.html \ + doc/Zoltan_html/dev_html/dev_intro_sqe.html \ + doc/Zoltan_html/dev_html/dev_lb.html \ + doc/Zoltan_html/dev_html/dev_lb_interface.html \ + doc/Zoltan_html/dev_html/dev_lb_structs.html \ + doc/Zoltan_html/dev_html/dev_lb_types.html \ + doc/Zoltan_html/dev_html/dev_mig.html \ + doc/Zoltan_html/dev_html/dev_parmetis.html \ + doc/Zoltan_html/dev_html/dev_phg.html \ + doc/Zoltan_html/dev_html/dev_rcb.html \ + doc/Zoltan_html/dev_html/dev_refs.html \ + doc/Zoltan_html/dev_html/dev_reftree.html \ + doc/Zoltan_html/dev_html/dev_rib.html \ + doc/Zoltan_html/dev_html/dev_services.html \ + doc/Zoltan_html/dev_html/dev_services_debug.html \ + doc/Zoltan_html/dev_html/dev_services_hash.html \ + doc/Zoltan_html/dev_html/dev_services_objlist.html \ + doc/Zoltan_html/dev_html/dev_services_parallel.html \ + doc/Zoltan_html/dev_html/dev_services_params.html \ + doc/Zoltan_html/dev_html/dev_services_time.html \ + doc/Zoltan_html/dev_html/dev_services_zoltantimer.html \ + doc/Zoltan_html/dev_html/dev_test_script.html \ + doc/Zoltan_html/dev_html/dev_view.html \ + doc/Zoltan_html/dev_html/film2d.png \ + doc/Zoltan_html/dev_html/hammondMesh.png \ + doc/Zoltan_html/dev_html/hammondPoints.png \ + doc/Zoltan_html/dev_html/zdrive.inp \ + doc/Zoltan_html/ug_html/figures/arrow.gif \ + doc/Zoltan_html/ug_html/figures/HGFigure.gif \ + doc/Zoltan_html/ug_html/figures/hierexample.fig \ + doc/Zoltan_html/ug_html/figures/hierexample.gif \ + doc/Zoltan_html/ug_html/figures/Z.gif \ + doc/Zoltan_html/ug_html/ug.html \ + doc/Zoltan_html/ug_html/ug_alg.html \ + doc/Zoltan_html/ug_html/ug_alg_block.html \ + doc/Zoltan_html/ug_html/ug_alg_geom.html \ + doc/Zoltan_html/ug_html/ug_alg_graph.html \ + doc/Zoltan_html/ug_html/ug_alg_hier.html \ + doc/Zoltan_html/ug_html/ug_alg_hsfc.html \ + doc/Zoltan_html/ug_html/ug_alg_hypergraph.html \ + doc/Zoltan_html/ug_html/ug_alg_jostle.html \ + doc/Zoltan_html/ug_html/ug_alg_oct.html \ + doc/Zoltan_html/ug_html/ug_alg_parkway.html \ + doc/Zoltan_html/ug_html/ug_alg_parmetis.html \ + doc/Zoltan_html/ug_html/ug_alg_patoh.html \ + doc/Zoltan_html/ug_html/ug_alg_phg.html \ + doc/Zoltan_html/ug_html/ug_alg_ptscotch.html \ + doc/Zoltan_html/ug_html/ug_alg_random.html \ + doc/Zoltan_html/ug_html/ug_alg_rcb.html \ + doc/Zoltan_html/ug_html/ug_alg_reftree.html \ + doc/Zoltan_html/ug_html/ug_alg_rib.html \ + doc/Zoltan_html/ug_html/ug_alg_simple.html \ + doc/Zoltan_html/ug_html/ug_backward.html \ + doc/Zoltan_html/ug_html/ug_color.html \ + doc/Zoltan_html/ug_html/ug_color_parallel.html \ + doc/Zoltan_html/ug_html/ug_cpp.html \ + doc/Zoltan_html/ug_html/ug_examples.html \ + doc/Zoltan_html/ug_html/ug_examples_init.html \ + doc/Zoltan_html/ug_html/ug_examples_lb.html \ + doc/Zoltan_html/ug_html/ug_examples_mig.html \ + doc/Zoltan_html/ug_html/ug_examples_query.html \ + doc/Zoltan_html/ug_html/ug_fortran.html \ + doc/Zoltan_html/ug_html/ug_graph_vs_hg.html \ + doc/Zoltan_html/ug_html/ug_index.html \ + doc/Zoltan_html/ug_html/ug_interface.html \ + doc/Zoltan_html/ug_html/ug_interface_augment.html \ + doc/Zoltan_html/ug_html/ug_interface_color.html \ + doc/Zoltan_html/ug_html/ug_interface_init.html \ + doc/Zoltan_html/ug_html/ug_interface_lb.html \ + doc/Zoltan_html/ug_html/ug_interface_mig.html \ + doc/Zoltan_html/ug_html/ug_interface_order.html \ + doc/Zoltan_html/ug_html/ug_intro.html \ + doc/Zoltan_html/ug_html/ug_order.html \ + doc/Zoltan_html/ug_html/ug_order_parmetis.html \ + doc/Zoltan_html/ug_html/ug_order_ptscotch.html \ + doc/Zoltan_html/ug_html/ug_param.html \ + doc/Zoltan_html/ug_html/ug_query.html \ + doc/Zoltan_html/ug_html/ug_query_lb.html \ + doc/Zoltan_html/ug_html/ug_query_mig.html \ + doc/Zoltan_html/ug_html/ug_refs.html \ + doc/Zoltan_html/ug_html/ug_release.html \ + doc/Zoltan_html/ug_html/ug_usage.html \ + doc/Zoltan_html/ug_html/ug_util.html \ + doc/Zoltan_html/ug_html/ug_util_comm.html \ + doc/Zoltan_html/ug_html/ug_util_dd.html \ + doc/Zoltan_html/ug_html/ug_util_mem.html + +ZOLTAN_SAMPLE = \ + SampleConfigurationScripts/mac_osX_no_fortran \ + SampleConfigurationScripts/linux_with_purify_zoltan_only \ + SampleConfigurationScripts/mac_osX_zoltan_only \ + SampleConfigurationScripts/linux_zoltan_only \ + SampleConfigurationScripts/linux_trilinos_runtests \ + SampleConfigurationScripts/linux_zoltan_dist \ + SampleConfigurationScripts/mac_osX_zoltan_dist + +ZOLTAN_CMAKE = \ + CMakeLists.txt \ + src/CMakeLists.txt \ + src/driver/CMakeLists.txt \ + src/fdriver/CMakeLists.txt \ + test/CMakeLists.txt \ + test/ch_simple/CMakeLists.txt \ + test/hg_simple/CMakeLists.txt \ + cmake/Dependencies.cmake \ + cmake/Zoltan_config.h.in + +EXTRA_DIST = config/generate-makeoptions.pl \ + README.html README.txt \ + config/strip_dup_incl_paths.pl config/strip_dup_libs.pl \ + config/replace-install-prefix.pl config/string-replace.pl \ + config/token-replace.pl \ + Disclaimer GNU_Lesser_GPL.txt Known_Problems VERSION \ + $(ZOLTAN_DOCS) $(ZOLTAN_SAMPLE) $(ZOLTAN_TESTS) + +AUX_DIST = config/install-sh config/missing config/mkinstalldirs +MAINTAINERCLEANFILES = Makefile.in aclocal.m4 autom4te.cache/* \ + configure config.status config.log \ + src/common/config-h.in src/common/stamp-h.in \ + $(AUX_DIST) + + +#We now build tests and examples through separate make targets, rather than +#during "make". We still need to conditionally include the test and example +#in SUBDIRS, even though BUILD_TESTS and BUILD_EXAMPLES will never be +#defined, so that the tests and examples are included in the distribution +#tarball. + +#Add this later +@SUB_TEST_TRUE@TEST_SUBDIR = +@SUB_EXAMPLE_TRUE@EXAMPLE_SUBDIR = example +@HAVE_MPI_FALSE@SIMPI_SUBDIR = siMPI +@HAVE_MPI_TRUE@SIMPI_SUBDIR = +SUBDIRS = $(SIMPI_SUBDIR) src $(EXAMPLE_SUBDIR) $(TEST_SUBDIR) +TRILINOS_HOME_DIR = @abs_top_srcdir@/../.. +TRILINOS_BUILD_DIR = @abs_top_builddir@/../.. +TRILINOS_MPI_MAX_PROC = 4 +TRILINOS_TEST_CATEGORY = INSTALL +all: all-recursive + +.SUFFIXES: +am--refresh: Makefile + @: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + echo ' cd $(srcdir) && $(AUTOMAKE) --foreign'; \ + $(am__cd) $(srcdir) && $(AUTOMAKE) --foreign \ + && exit 0; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --foreign Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + echo ' $(SHELL) ./config.status'; \ + $(SHELL) ./config.status;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + $(SHELL) ./config.status --recheck + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + $(am__cd) $(srcdir) && $(AUTOCONF) +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + $(am__cd) $(srcdir) && $(ACLOCAL) $(ACLOCAL_AMFLAGS) +$(am__aclocal_m4_deps): + +src/include/Zoltan_config.h: src/include/stamp-h1 + @if test ! -f $@; then rm -f src/include/stamp-h1; else :; fi + @if test ! -f $@; then $(MAKE) $(AM_MAKEFLAGS) src/include/stamp-h1; else :; fi + +src/include/stamp-h1: $(top_srcdir)/src/include/Zoltan_config.h.in $(top_builddir)/config.status + @rm -f src/include/stamp-h1 + cd $(top_builddir) && $(SHELL) ./config.status src/include/Zoltan_config.h +$(top_srcdir)/src/include/Zoltan_config.h.in: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + ($(am__cd) $(top_srcdir) && $(AUTOHEADER)) + rm -f src/include/stamp-h1 + touch $@ + +distclean-hdr: + -rm -f src/include/Zoltan_config.h src/include/stamp-h1 +Makefile.export.zoltan: $(top_builddir)/config.status $(srcdir)/Makefile.export.zoltan.in + cd $(top_builddir) && $(SHELL) ./config.status $@ + +# This directory's subdirectories are mostly independent; you can cd +# into them and run `make' without going through this Makefile. +# To change the values of `make' variables: instead of editing Makefiles, +# (1) if the variable is set in `config.status', edit `config.status' +# (which will cause the Makefiles to be regenerated when you run `make'); +# (2) otherwise, pass the desired values on the `make' command line. +$(RECURSIVE_TARGETS): + @fail= failcom='exit 1'; \ + for f in x $$MAKEFLAGS; do \ + case $$f in \ + *=* | --[!k]*);; \ + *k*) failcom='fail=yes';; \ + esac; \ + done; \ + dot_seen=no; \ + target=`echo $@ | sed s/-recursive//`; \ + list='$(SUBDIRS)'; for subdir in $$list; do \ + echo "Making $$target in $$subdir"; \ + if test "$$subdir" = "."; then \ + dot_seen=yes; \ + local_target="$$target-am"; \ + else \ + local_target="$$target"; \ + fi; \ + ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ + || eval $$failcom; \ + done; \ + if test "$$dot_seen" = "no"; then \ + $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ + fi; test -z "$$fail" + +$(RECURSIVE_CLEAN_TARGETS): + @fail= failcom='exit 1'; \ + for f in x $$MAKEFLAGS; do \ + case $$f in \ + *=* | --[!k]*);; \ + *k*) failcom='fail=yes';; \ + esac; \ + done; \ + dot_seen=no; \ + case "$@" in \ + distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ + *) list='$(SUBDIRS)' ;; \ + esac; \ + rev=''; for subdir in $$list; do \ + if test "$$subdir" = "."; then :; else \ + rev="$$subdir $$rev"; \ + fi; \ + done; \ + rev="$$rev ."; \ + target=`echo $@ | sed s/-recursive//`; \ + for subdir in $$rev; do \ + echo "Making $$target in $$subdir"; \ + if test "$$subdir" = "."; then \ + local_target="$$target-am"; \ + else \ + local_target="$$target"; \ + fi; \ + ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ + || eval $$failcom; \ + done && test -z "$$fail" +tags-recursive: + list='$(SUBDIRS)'; for subdir in $$list; do \ + test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ + done +ctags-recursive: + list='$(SUBDIRS)'; for subdir in $$list; do \ + test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ + done + +ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + mkid -fID $$unique +tags: TAGS + +TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + set x; \ + here=`pwd`; \ + if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ + include_option=--etags-include; \ + empty_fix=.; \ + else \ + include_option=--include; \ + empty_fix=; \ + fi; \ + list='$(SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + test ! -f $$subdir/TAGS || \ + set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ + fi; \ + done; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: CTAGS +CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + $(am__remove_distdir) + test -d "$(distdir)" || mkdir "$(distdir)" + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done + @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + test -d "$(distdir)/$$subdir" \ + || $(MKDIR_P) "$(distdir)/$$subdir" \ + || exit 1; \ + fi; \ + done + @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ + $(am__relativize); \ + new_distdir=$$reldir; \ + dir1=$$subdir; dir2="$(top_distdir)"; \ + $(am__relativize); \ + new_top_distdir=$$reldir; \ + echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ + echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ + ($(am__cd) $$subdir && \ + $(MAKE) $(AM_MAKEFLAGS) \ + top_distdir="$$new_top_distdir" \ + distdir="$$new_distdir" \ + am__remove_distdir=: \ + am__skip_length_check=: \ + am__skip_mode_fix=: \ + distdir) \ + || exit 1; \ + fi; \ + done + -test -n "$(am__skip_mode_fix)" \ + || find "$(distdir)" -type d ! -perm -755 \ + -exec chmod u+rwx,go+rx {} \; -o \ + ! -type d ! -perm -444 -links 1 -exec chmod a+r {} \; -o \ + ! -type d ! -perm -400 -exec chmod a+r {} \; -o \ + ! -type d ! -perm -444 -exec $(install_sh) -c -m a+r {} {} \; \ + || chmod -R a+r "$(distdir)" +dist-gzip: distdir + tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz + $(am__remove_distdir) + +dist-bzip2: distdir + tardir=$(distdir) && $(am__tar) | BZIP2=$${BZIP2--9} bzip2 -c >$(distdir).tar.bz2 + $(am__remove_distdir) + +dist-lzip: distdir + tardir=$(distdir) && $(am__tar) | lzip -c $${LZIP_OPT--9} >$(distdir).tar.lz + $(am__remove_distdir) + +dist-lzma: distdir + tardir=$(distdir) && $(am__tar) | lzma -9 -c >$(distdir).tar.lzma + $(am__remove_distdir) + +dist-xz: distdir + tardir=$(distdir) && $(am__tar) | XZ_OPT=$${XZ_OPT--e} xz -c >$(distdir).tar.xz + $(am__remove_distdir) + +dist-tarZ: distdir + tardir=$(distdir) && $(am__tar) | compress -c >$(distdir).tar.Z + $(am__remove_distdir) + +dist-shar: distdir + shar $(distdir) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).shar.gz + $(am__remove_distdir) + +dist-zip: distdir + -rm -f $(distdir).zip + zip -rq $(distdir).zip $(distdir) + $(am__remove_distdir) + +dist dist-all: distdir + tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz + $(am__remove_distdir) + +# This target untars the dist file and tries a VPATH configuration. Then +# it guarantees that the distribution is self-contained by making another +# tarfile. +distcheck: dist + case '$(DIST_ARCHIVES)' in \ + *.tar.gz*) \ + GZIP=$(GZIP_ENV) gzip -dc $(distdir).tar.gz | $(am__untar) ;;\ + *.tar.bz2*) \ + bzip2 -dc $(distdir).tar.bz2 | $(am__untar) ;;\ + *.tar.lzma*) \ + lzma -dc $(distdir).tar.lzma | $(am__untar) ;;\ + *.tar.lz*) \ + lzip -dc $(distdir).tar.lz | $(am__untar) ;;\ + *.tar.xz*) \ + xz -dc $(distdir).tar.xz | $(am__untar) ;;\ + *.tar.Z*) \ + uncompress -c $(distdir).tar.Z | $(am__untar) ;;\ + *.shar.gz*) \ + GZIP=$(GZIP_ENV) gzip -dc $(distdir).shar.gz | unshar ;;\ + *.zip*) \ + unzip $(distdir).zip ;;\ + esac + chmod -R a-w $(distdir); chmod a+w $(distdir) + mkdir $(distdir)/_build + mkdir $(distdir)/_inst + chmod a-w $(distdir) + test -d $(distdir)/_build || exit 0; \ + dc_install_base=`$(am__cd) $(distdir)/_inst && pwd | sed -e 's,^[^:\\/]:[\\/],/,'` \ + && dc_destdir="$${TMPDIR-/tmp}/am-dc-$$$$/" \ + && am__cwd=`pwd` \ + && $(am__cd) $(distdir)/_build \ + && ../configure --srcdir=.. --prefix="$$dc_install_base" \ + $(AM_DISTCHECK_CONFIGURE_FLAGS) \ + $(DISTCHECK_CONFIGURE_FLAGS) \ + && $(MAKE) $(AM_MAKEFLAGS) \ + && $(MAKE) $(AM_MAKEFLAGS) dvi \ + && $(MAKE) $(AM_MAKEFLAGS) check \ + && $(MAKE) $(AM_MAKEFLAGS) install \ + && $(MAKE) $(AM_MAKEFLAGS) installcheck \ + && $(MAKE) $(AM_MAKEFLAGS) uninstall \ + && $(MAKE) $(AM_MAKEFLAGS) distuninstallcheck_dir="$$dc_install_base" \ + distuninstallcheck \ + && chmod -R a-w "$$dc_install_base" \ + && ({ \ + (cd ../.. && umask 077 && mkdir "$$dc_destdir") \ + && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" install \ + && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" uninstall \ + && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" \ + distuninstallcheck_dir="$$dc_destdir" distuninstallcheck; \ + } || { rm -rf "$$dc_destdir"; exit 1; }) \ + && rm -rf "$$dc_destdir" \ + && $(MAKE) $(AM_MAKEFLAGS) dist \ + && rm -rf $(DIST_ARCHIVES) \ + && $(MAKE) $(AM_MAKEFLAGS) distcleancheck \ + && cd "$$am__cwd" \ + || exit 1 + $(am__remove_distdir) + @(echo "$(distdir) archives ready for distribution: "; \ + list='$(DIST_ARCHIVES)'; for i in $$list; do echo $$i; done) | \ + sed -e 1h -e 1s/./=/g -e 1p -e 1x -e '$$p' -e '$$x' +distuninstallcheck: + @test -n '$(distuninstallcheck_dir)' || { \ + echo 'ERROR: trying to run $@ with an empty' \ + '$$(distuninstallcheck_dir)' >&2; \ + exit 1; \ + }; \ + $(am__cd) '$(distuninstallcheck_dir)' || { \ + echo 'ERROR: cannot chdir into $(distuninstallcheck_dir)' >&2; \ + exit 1; \ + }; \ + test `$(am__distuninstallcheck_listfiles) | wc -l` -eq 0 \ + || { echo "ERROR: files left after uninstall:" ; \ + if test -n "$(DESTDIR)"; then \ + echo " (check DESTDIR support)"; \ + fi ; \ + $(distuninstallcheck_listfiles) ; \ + exit 1; } >&2 +distcleancheck: distclean + @if test '$(srcdir)' = . ; then \ + echo "ERROR: distcleancheck can only run from a VPATH build" ; \ + exit 1 ; \ + fi + @test `$(distcleancheck_listfiles) | wc -l` -eq 0 \ + || { echo "ERROR: files left in build directory after distclean:" ; \ + $(distcleancheck_listfiles) ; \ + exit 1; } >&2 +check-am: all-am +check: check-recursive +all-am: Makefile all-local +installdirs: installdirs-recursive +installdirs-am: +install: install-recursive +install-exec: install-exec-recursive +install-data: install-data-recursive +uninstall: uninstall-recursive + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-recursive +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-recursive + +clean-am: clean-generic mostlyclean-am + +distclean: distclean-recursive + -rm -f $(am__CONFIG_DISTCLEAN_FILES) + -rm -f Makefile +distclean-am: clean-am distclean-generic distclean-hdr distclean-tags + +dvi: dvi-recursive + +dvi-am: + +html: html-recursive + +html-am: + +info: info-recursive + +info-am: + +install-data-am: + +install-dvi: install-dvi-recursive + +install-dvi-am: + +install-exec-am: + @$(NORMAL_INSTALL) + $(MAKE) $(AM_MAKEFLAGS) install-exec-hook +install-html: install-html-recursive + +install-html-am: + +install-info: install-info-recursive + +install-info-am: + +install-man: + +install-pdf: install-pdf-recursive + +install-pdf-am: + +install-ps: install-ps-recursive + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-recursive + -rm -f $(am__CONFIG_DISTCLEAN_FILES) + -rm -rf $(top_srcdir)/autom4te.cache + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-recursive + +mostlyclean-am: mostlyclean-generic + +pdf: pdf-recursive + +pdf-am: + +ps: ps-recursive + +ps-am: + +uninstall-am: + @$(NORMAL_INSTALL) + $(MAKE) $(AM_MAKEFLAGS) uninstall-hook +.MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ + install-am install-exec-am install-strip tags-recursive \ + uninstall-am + +.PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ + all all-am all-local am--refresh check check-am clean \ + clean-generic ctags ctags-recursive dist dist-all dist-bzip2 \ + dist-gzip dist-lzip dist-lzma dist-shar dist-tarZ dist-xz \ + dist-zip distcheck distclean distclean-generic distclean-hdr \ + distclean-tags distcleancheck distdir distuninstallcheck dvi \ + dvi-am html html-am info info-am install install-am \ + install-data install-data-am install-dvi install-dvi-am \ + install-exec install-exec-am install-exec-hook install-html \ + install-html-am install-info install-info-am install-man \ + install-pdf install-pdf-am install-ps install-ps-am \ + install-strip installcheck installcheck-am installdirs \ + installdirs-am maintainer-clean maintainer-clean-generic \ + mostlyclean mostlyclean-generic pdf pdf-am ps ps-am tags \ + tags-recursive uninstall uninstall-am uninstall-hook + + +#The following line helps the test harness recover from build errors. + +all-local: + @echo "" + @echo "Trilinos package zoltan built successfully." + @echo "" + +@BUILD_TESTS_TRUE@tests: examples +@BUILD_TESTS_TRUE@ @echo "" +@BUILD_TESTS_TRUE@ @echo "Now building zoltan tests." +@BUILD_TESTS_TRUE@ @echo "" +@BUILD_TESTS_TRUE@ @echo "Zoltan tests use drivers and input files; no compilation necessary." +@BUILD_TESTS_TRUE@ @echo "" +@BUILD_TESTS_TRUE@ @echo "Finished building zoltan tests." +@BUILD_TESTS_TRUE@ @echo "" +@BUILD_TESTS_FALSE@tests: +@BUILD_TESTS_FALSE@ @echo "zoltan tests were disabled at configure time" + +@BUILD_EXAMPLES_TRUE@examples: +@BUILD_EXAMPLES_TRUE@ @echo "" +@BUILD_EXAMPLES_TRUE@ @echo "Now building zoltan examples." +@BUILD_EXAMPLES_TRUE@ @echo "" +@BUILD_EXAMPLES_TRUE@ cd $(top_builddir)/example && $(MAKE) +@BUILD_EXAMPLES_TRUE@ @echo "" +@BUILD_EXAMPLES_TRUE@ @echo "Finished building zoltan examples." +@BUILD_EXAMPLES_TRUE@ @echo "" + +@BUILD_EXAMPLES_TRUE@install-examples: +@BUILD_EXAMPLES_TRUE@ cd $(top_builddir)/example && $(MAKE) install +@BUILD_EXAMPLES_FALSE@examples: +@BUILD_EXAMPLES_FALSE@ @echo "zoltan examples were disabled at configure time" + +@BUILD_EXAMPLES_FALSE@install-examples: +@BUILD_EXAMPLES_FALSE@ @echo "zoltan examples were disabled at configure time" + +clean-tests: + cd $(top_builddir)/test && $(MAKE) clean + +clean-examples: + cd $(top_builddir)/example && $(MAKE) clean + +everything: + $(MAKE) && $(MAKE) examples && $(MAKE) tests + +clean-everything: + $(MAKE) clean-examples && $(MAKE) clean-tests && $(MAKE) clean + +install-everything: + $(MAKE) install && $(MAKE) install-examples + +runtests-serial : + $(PERL_EXE) $(TRILINOS_HOME_DIR)/commonTools/test/utilities/runtests \ + --trilinos-dir=$(TRILINOS_HOME_DIR) \ + --comm=serial \ + --build-dir=$(TRILINOS_BUILD_DIR) \ + --category=$(TRILINOS_TEST_CATEGORY) \ + --output-dir=@abs_top_builddir@/test/runtests-results \ + --verbosity=1 \ + --packages=zoltan + +runtests-mpi : + $(PERL_EXE) $(TRILINOS_HOME_DIR)/commonTools/test/utilities/runtests \ + --trilinos-dir=$(TRILINOS_HOME_DIR) \ + --comm=mpi \ + --mpi-go=$(TRILINOS_MPI_GO) \ + --build-dir=$(TRILINOS_BUILD_DIR) \ + --max-proc=$(TRILINOS_MPI_MAX_PROC) \ + --category=$(TRILINOS_TEST_CATEGORY) \ + --output-dir=@abs_top_builddir@/test/runtests-results \ + --verbosity=1 \ + --packages=zoltan + +@USING_EXPORT_MAKEFILES_TRUE@install-exec-hook: +@USING_EXPORT_MAKEFILES_TRUE@ mkdir -p $(DESTDIR)$(includedir) +@USING_EXPORT_MAKEFILES_TRUE@ cp $(top_builddir)/Makefile.export.zoltan $(DESTDIR)$(includedir)/. +@USING_EXPORT_MAKEFILES_TRUE@ $(PERL_EXE) $(top_srcdir)/config/replace-install-prefix.pl \ +@USING_EXPORT_MAKEFILES_TRUE@ --exec-prefix=$(exec_prefix) \ +@USING_EXPORT_MAKEFILES_TRUE@ --my-export-makefile=Makefile.export.zoltan \ +@USING_EXPORT_MAKEFILES_TRUE@ --my-abs-top-srcdir=@abs_top_srcdir@ \ +@USING_EXPORT_MAKEFILES_TRUE@ --my-abs-incl-dirs=@abs_top_builddir@/src:@abs_top_srcdir@/src \ +@USING_EXPORT_MAKEFILES_TRUE@ --my-abs-lib-dirs=@abs_top_builddir@/src +@USING_EXPORT_MAKEFILES_TRUE@ $(PERL_EXE) $(top_srcdir)/config/generate-makeoptions.pl $(top_builddir)/src/Makefile ZOLTAN > $(DESTDIR)$(includedir)/Makefile.export.zoltan.macros + +@USING_EXPORT_MAKEFILES_TRUE@uninstall-hook: +@USING_EXPORT_MAKEFILES_TRUE@ rm -f $(includedir)/Makefile.export.zoltan +@USING_EXPORT_MAKEFILES_TRUE@ rm -f $(includedir)/Makefile.export.zoltan.macros + +@USING_EXPORT_MAKEFILES_FALSE@install-exec-hook: + +@USING_EXPORT_MAKEFILES_FALSE@uninstall-hook: + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/Zoltan-3.90/README b/Zoltan-3.90/README new file mode 100644 index 00000000..6eaf0949 --- /dev/null +++ b/Zoltan-3.90/README @@ -0,0 +1,94 @@ +# @HEADER +# +######################################################################## +# +# Zoltan Toolkit for Load-balancing, Partitioning, Ordering and Coloring +# Copyright 2012 Sandia Corporation +# +# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, +# the U.S. Government retains certain rights in this software. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# 3. Neither the name of the Corporation nor the names of the +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# Questions? Contact Karen Devine kddevin@sandia.gov +# Erik Boman egboman@sandia.gov +# +######################################################################## +# +# @HEADER +@HEADER + +********************************************************************** + + Zoltan Toolkit for Load-balancing, Partitioning, Ordering and Coloring + Copyright 2012 Sandia Corporation + +Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, +the U.S. Government retains certain rights in this software. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +3. Neither the name of the Corporation nor the names of the +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Questions? Contact Karen Devine kddevin@sandia.gov + Erik Boman egboman@sandia.gov + +@HEADER + +############################################################################## + +INSTALLATION +------------ +Instructions for building and installing Zoltan using CMAKE or Autotools +are at the following web site: + + http://www.cs.sandia.gov/zoltan/ug_html/ug_usage.html + diff --git a/Zoltan-3.90/README.developer b/Zoltan-3.90/README.developer new file mode 100644 index 00000000..bc2e5fb7 --- /dev/null +++ b/Zoltan-3.90/README.developer @@ -0,0 +1,166 @@ +# @HEADER +# +######################################################################## +# +# Zoltan Toolkit for Load-balancing, Partitioning, Ordering and Coloring +# Copyright 2012 Sandia Corporation +# +# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, +# the U.S. Government retains certain rights in this software. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# 3. Neither the name of the Corporation nor the names of the +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# Questions? Contact Karen Devine kddevin@sandia.gov +# Erik Boman egboman@sandia.gov +# +######################################################################## +# +# @HEADER +lriesen@sandia.gov +November 29, 2010 + +Notes on typedefs that were just introduced in order to support 64-bit global counts and IDs: + +ZOLTAN_ID_TYPE: +============== + +A ZOLTAN_ID_TYPE is still the type that we use for global IDs passed in by the user. However now the ZOLTAN_ID_TYPE can be set at configuration time. It can be unsigned int, unsigned long, or unsigned long long. The default is unsigned int. + +In CMake you can choose: + +-D Zoltan_ENABLE_UINT_IDS:Bool=ON +-D Zoltan_ENABLE_ULONG_IDS:Bool=ON +-D Zoltan_ENABLE_ULLONG_IDS:Bool=ON + +Using autoconf it's: + +--with-id-type=uint +--with-id-type=ulong +--with-id-type=ullong + +To print a ZOLTAN_ID_TYPE use ZOLTAN_ID_SPEC: + +printf("GID: " ZOLTAN_ID_SPEC ", LID %d\n", my_gid, my_lid); + +To send a ZOLTAN_ID_TYPE in an MPI message, use ZOLTAN_ID_MPI_TYPE: + +MPI_Bcast(&gid, 1, ZOLTAN_ID_MPI_TYPE, 0, MPI_COMM_WORLD); + +To silence compiler warnings, you can properly specify a constant of type ZOLTAN_ID_TYPE using ZOLTAN_ID_CONSTANT: + +a = ZOLTAN_ID_CONSTANT(99) + +ZOLTAN_GNO_TYPE: +=============== +ZOLTAN_GNO_TYPE is a new typedef which Zoltan uses for global counts and for internal global IDs regardless of the definition of ZOLTAN_ID_TYPE. It is defined to be ssize_t, so it is signed and it will be 64 bits on a 64-bit architecture and 32 bits on a 32-bit architecture. + +The MPI_Datatype for ZOLTAN_GNO_TYPE is returned by Zoltan_mpi_gno_type(). + +Any time that Zoltan creates a new problem from the application supplied problem it uses ZOLTAN_GNO_TYPE for the object IDs. + +indextype and weighttype: +======================== + +At compile time, the Zoltan library defines indextype and weighttype to coincide with the third party graph libraries, if any, that it has been configured to work with. + +The "indextype" will be the data type used by the third party library (TPL) for global IDs. + +The "weighttype" will be the data type used by the TPL for weights. + +Based on configure-time parameters, Zoltan will recognize and correcty set types for Metis, ParMetis, 32- or 64-bit Scotch or 32- or 64-bit PTScotch. + +As a developer, it is important to know where the boundary is in the code for the use of Zoltan's types (ZOLTAN_ID_TYPE, ZOLTAN_GNO_TYPE, float) versus the types used by the TPL (indextype, weighttype). A simplified explanation is that indextype and weighttype are only used in source code that is in the "tpls" directory. + +More specifically, it goes like this: + +1. Zoltan calls the application query functions to obtain the graph using Zoltan's data types for IDs and weights. + +2. Zoltan builds graph and matrix structures in Zoltan_ZG_Build using Zoltan's data types. (I suspect the "ZG" stands for "Zoltan Graph"). + +3. The graph is exported to the TPL data structures in Zoltan_ZG_Export. This is the point where arrays are converted if necessary to the data types used by the third party libraries. The C-structures with names like ZOLTAN_Third_* and others found in tpls/third_library.h use the indextype and weighttype data types. + +4. Of course the TPL is called with indextype and weighttype objects. + +5. Zoltan_Postprocess_Graph is called on the TPL structures and writes the ZOLTAN_Output_Part structure which uses Zoltan's data types. + +6. Zoltan_Third_Export_User uses the ZOLTAN_Output_Part structure to write the part assignments to the structures returned to the user. + +To print a indextype or weighttype use TPL_IDX_SPEC or TPL_WGT_SPEC respectively. + +If the TPL weight type is a floating point type, then TPL_FLOAT_WEIGHT will be defined. + +If the TPL weight type is an integral type, then TPL_INTEGRAL_WEIGHT will be defined. + +Other useful TPL configuration macro definitions can be found in tpls/third_library_const.h. + +Zoltan can be configured to use both Scotch and ParMetis as long as the 32-bit version of Scotch is used. + +Assumptions: +=========== + +sizeof(ZOLTAN_GNO_TYPE) >= sizeof(ZOLTAN_ID_TYPE) +sizeof(ZOLTAN_ID_TYPE) >= sizeof(int) + +Some changes to support 64-bit IDs: +================================== + +Zoltan_Map_Create() used to assume it was handling keys that were multiples of ZOLTAN_ID_TYPEs. Now you supply the number of bytes in the key, not the number of ZOLTAN_ID_TYPEs. + +Because testing of this branch involves running large memory problems, I added the function Zoltan_write_linux_meminfo() which will write out the contents of /proc/meminfo on a Linux machine. The new function Zoltan_Memory_Get_Debug() returns the debug level set in mem.c by Zoltan_Memory_Debug(). zdrive has a new input option + + zoltan memory debug level = n + +which will set the debug level. Then after partitioning, zdrive checks the debug level and if there was an error and it is running on a linux machine it will dump out /proc/meminfo. + +I modified the configure script to define HOST_LINUX on a linux machine. + +I wrote three tests in tests/Large_Data that test PHG, RCB and RIB with arbitrarily large numbers of objects. They have signal handlers that call Zoltan_write_linux_meminfo() on a Linux machine. One test can be configured to use 64-bit IDs when it has less than 2*10^9 IDs. + +Limitations: +=========== +The reftree and oct methods have not been converted to support 64-bit global IDs and global numbers. + +INTERESTING CHART: +================= + +32 and 64 bit data models (ILP - integer/long/pointer): + +type LP32 ILP32 ILP64 LLP64 LP64 + +char 8 8 8 8 8 +short 16 16 16 16 16 +_int32 32 +int 16 32 64 32 32 +long 32 32 64 32 64 +long long 64 +pointer 32 32 64 64 64 + +ILP32 is most widely used. +LP64 is most widely used. + +LLP64 is ILP32 with new 64 bit int added to it - used for Win64. diff --git a/Zoltan-3.90/README.md b/Zoltan-3.90/README.md new file mode 100644 index 00000000..74cb9197 --- /dev/null +++ b/Zoltan-3.90/README.md @@ -0,0 +1,38 @@ +# Zoltan +Zoltan Dynamic Load Balancing and Graph Algorithm Toolkit -- Distribution site + +The most up-to-date version of Zoltan is in the Trilinos framework at https://github.com/trilinos/Trilinos. + +This site provides stand-alone releases of Zoltan, separate from Trilinos: https://github.com/sandialabs/Zoltan/releases. Stand-alone releases of Zoltan may lag the Trilinos repo code. + +You can download individual releases of Zoltan from this site, or clone the Trilinos repository +https://github.com/trilinos/Trilinos to get the most up-to-date version of Zoltan. Individual releases are tarballs that can be unzipped and built with autotools/make. + +Trilinos clones include Zoltan in directory Trilinos/packages/zoltan. In this directory, you can +build Zoltan separately from Trilinos using autotools/make. Or in the Trilinos repository, +you can build Zoltan using Trilinos' cmake system. + +See https://htmlpreview.github.io/?https://github.com/sandialabs/zoltan/blob/master/doc/Zoltan_html/ug_html/ug_usage.html +for details on building Zoltan. + +The main Zoltan page is http://cs.sandia.gov/Zoltan. + +Release history: https://htmlpreview.github.io/?https://github.com/sandialabs/zoltan/blob/master/doc/Zoltan_html/ug_html/ug_release.html +* Version 3.90 (4/08/21; as in Trilinos v13; d328e0e2a8a5c48a4e01d6541cd8c0eb7f364823) +* Version 3.83 (1/28/16; as in Trilinos v12.6; aaf328db7e43001ee2d3148f72f12147e51c3293) +* Version 3.82 (5/1/15; as in Trilinos v12) +* Version 3.81 (11/06/14; as in Trilinos v11.12.2) +* Version 3.8 (10/28/13; as in Trilinos v10.11) +* Version 3.6 (11/2/11; as in Trilinos v10.8) +* Version 3.501 (5/12/11; as in Trilinos v10.6 with additions) +* Version 3.3 (7/31/10; as in Trilinos v10.4) +* Version 3.2 (10/5/09) +* Version 3.1 (9/30/08) +* Version 3.0 (5/30/07) +* Version 2.1 (10/05/06) +* Version 2.0 (7/17/06) +* Version 1.5 (5/29/03) +* Version 1.4 (6/18/02) +* Version 1.3 (3/27/02) -- the Original + +Questions? Email zoltan-dev@software.sandia.gov diff --git a/Zoltan-3.90/ReleaseNotes.txt b/Zoltan-3.90/ReleaseNotes.txt new file mode 100644 index 00000000..c78a26ab --- /dev/null +++ b/Zoltan-3.90/ReleaseNotes.txt @@ -0,0 +1,60 @@ +Zoltan release notes for Trilinos v12.8 + +Improved robustness of RCB partitioner for problems where many objects have +weight = 0 (e.g., PIC codes). Convergence is faster and the stopping +criteria are more robust. + +Fixed bug that occurred when RETURN_LIST=PARTS and (Num_GID > 1 or Num_LID > 1); +GIDs and LIDs are now copied correctly into return lists. + +Fixed a bug related to struct padding in the siMPI serial MPI interface. + +----------------------- + +Zoltan release notes for Trilinos v12.6 + +Minor code cleanup and bug fixes. + +New Zoltan_Get_Fn interface returning pointers to callback functions. +See zoltan/src/include/zoltan.h for details. + +Closest stand-alone Zoltan release is v3.83. +http://www.cs.sandia.gov/Zoltan + +----------------------- + +Zoltan release notes for Trilinos v11.8 + +Revised Scotch TPL specification in Trilinos' CMake environment to link +with all libraries needed by Scotch v6. + +Fixed bug in interface to ParMETIS v4 when multiple vertex weights are used. +Fixed bug in interface to Scotch when some processor has no vertices. + +----------------------- + +Zoltan release notes for Trilinos v11. + +Highlights are listed below; for more details, see +Trilinos/packages/zoltan/doc/Zoltan_html/ug_html/ug_release.html . + +- Zoltan is now released under Trilinos' BSD license. + +- The following Zoltan features are no longer supported in Trilinos v11: + + Zoltan v1 interface (as described in Zoltan include file lbi_const.h) + + Partitioning method OCTPART: use partitioning method HSFC instead. + + +- Hierarchical partitioning received several performance, interface and + testing improvements. An easier-to-use interface has been + added using simple parameters (HIER_ASSIST, PLATFORM_NAME, TOPOLOGY) + instead of callback functions; the callback function interface is still + supported. + +- Memory usage in Zoltan Distributed Data Directories is improved, leading to + faster execution times for data directories and hierarchical partitioning. + +- Compilation with gcc 4.7 is now supported. + +- Zoltan supports PT-Scotch v5.1.12 and ParMETIS v4, as well as some older + versions of these TPLs. diff --git a/Zoltan-3.90/SampleCmakeScripts/linux_zoltan_gid_64_only b/Zoltan-3.90/SampleCmakeScripts/linux_zoltan_gid_64_only new file mode 100755 index 00000000..c77faac4 --- /dev/null +++ b/Zoltan-3.90/SampleCmakeScripts/linux_zoltan_gid_64_only @@ -0,0 +1,31 @@ +#!/bin/csh +# Sample script for building using CMAKE on linux workstation octopi. +# 64-bit, Zoltan only. + +# Clean up the mess from previous configurations. +/bin/rm -r cmake* CMake* CPack* CTest* Dart* Trilinos* Testing packages Makefile OUT* + +cmake \ +-D CMAKE_INSTALL_PREFIX:FILEPATH="/home/lriesen/projects/Trilinos/build" \ +-D TPL_ENABLE_MPI:BOOL=ON \ +-D CMAKE_C_FLAGS:STRING="-m64 -g -DZOLTAN_ID_TYPE_LONG -std=c99" \ +-D CMAKE_CXX_FLAGS:STRING="-m64 -g" \ +-D CMAKE_Fortran_FLAGS:STRING="-m64 -g" \ +-D CMAKE_VERBOSE_MAKEFILE:BOOL=ON \ +-D MPIEXEC_MAX_NUMPROCS:STRING=11 \ +-D Trilinos_ENABLE_ALL_PACKAGES:BOOL=OFF \ +-D Trilinos_ENABLE_EXAMPLES:BOOL=ON \ +-D Trilinos_VERBOSE_CONFIGURE:BOOL=ON \ +-D Trilinos_ENABLE_Zoltan:BOOL=ON \ +-D Zoltan_ENABLE_EXAMPLES:BOOL=ON \ +-D Zoltan_ENABLE_TESTS:BOOL=ON \ +-D Zoltan_ENABLE_ParMETIS:BOOL=ON \ +-D ParMETIS_INCLUDE_DIRS:FILEPATH="/Net/local/proj/zoltan/arch/all/src/ParMETIS3_1" \ +-D ParMETIS_LIBRARY_DIRS:FILEPATH="/Net/local/proj/zoltan/arch/linux64/lib/openmpi/ParMETIS3_1" \ +-D Zoltan_ENABLE_Scotch:BOOL=OFF \ +-D Zoltan_ENABLE_PaToH:BOOL=OFF \ +.. |& tee OUTPUT.CMAKE + +make |& tee OUTPUT.MAKE +make install |& tee OUTPUT.INSTALL + diff --git a/Zoltan-3.90/SampleCmakeScripts/linux_zoltan_only b/Zoltan-3.90/SampleCmakeScripts/linux_zoltan_only new file mode 100755 index 00000000..e671d677 --- /dev/null +++ b/Zoltan-3.90/SampleCmakeScripts/linux_zoltan_only @@ -0,0 +1,36 @@ +#!/bin/csh +# Sample script for building using CMAKE on linux workstation octopi. +# 64-bit, Zoltan only. +# Run in Trilinos/Obj_cmake. + +# Clean up the mess from previous configurations. +/bin/rm -r cmake* CMake* CPack* CTest* Dart* Trilinos* Testing packages Makefile + +cmake \ +-D CMAKE_INSTALL_PREFIX:FILEPATH="/Net/local/homes/kddevin/code/trilinos/Obj_cmake" \ +-D TPL_ENABLE_MPI:BOOL=ON \ +-D CMAKE_C_FLAGS:STRING="-m64 -g" \ +-D CMAKE_CXX_FLAGS:STRING="-m64 -g" \ +-D CMAKE_Fortran_FLAGS:STRING="-m64 -g" \ +-D CMAKE_VERBOSE_MAKEFILE:BOOL=ON \ +-D MPI_EXEC_MAX_NUMPROCS:STRING=11 \ +-D Trilinos_ENABLE_ALL_PACKAGES:BOOL=OFF \ +-D Trilinos_ENABLE_EXAMPLES:BOOL=ON \ +-D Trilinos_VERBOSE_CONFIGURE:BOOL=ON \ +-D Trilinos_ENABLE_Zoltan:BOOL=ON \ +-D Zoltan_ENABLE_EXAMPLES:BOOL=ON \ +-D Zoltan_ENABLE_TESTS:BOOL=ON \ +-D Zoltan_ENABLE_ParMETIS:BOOL=ON \ +-D ParMETIS_INCLUDE_DIRS:FILEPATH="/Net/local/proj/zoltan/arch/all/src/ParMETIS3_1" \ +-D ParMETIS_LIBRARY_DIRS:FILEPATH="/Net/local/proj/zoltan/arch/linux64/lib/openmpi/ParMETIS3_1" \ +-D Zoltan_ENABLE_Scotch:BOOL=ON \ +-D Scotch_INCLUDE_DIRS:FILEPATH="/Net/local/proj/zoltan/arch/all/src/Scotch5" \ +-D Scotch_LIBRARY_DIRS:FILEPATH="/Net/local/proj/zoltan/arch/linux64/lib/openmpi/Scotch5" \ +-D Zoltan_ENABLE_PaToH:BOOL=ON \ +-D PaToH_LIBRARY_DIRS:FILEPATH="/Net/local/proj/zoltan/arch/linux64/lib" \ +-D PaToH_INCLUDE_DIRS:FILEPATH="/Net/local/proj/zoltan/arch/linux64/PaToH" \ +.. |& tee OUTPUT.CMAKE + +make |& tee OUTPUT.MAKE +make install |& tee OUTPUT.INSTALL + diff --git a/Zoltan-3.90/SampleCmakeScripts/linux_zoltan_only_purify b/Zoltan-3.90/SampleCmakeScripts/linux_zoltan_only_purify new file mode 100755 index 00000000..c7860a6b --- /dev/null +++ b/Zoltan-3.90/SampleCmakeScripts/linux_zoltan_only_purify @@ -0,0 +1,42 @@ +#!/bin/csh +# Sample script for building using CMAKE, LAM, and purify on octopi. +# 64-bit, Zoltan only. +# Run in Trilinos/Obj_pure. + +# Clean up the mess from previous configurations. +/bin/rm -r cmake* CMake* CPack* CTest* Dart* Trilinos* Testing packages Makefile OUTPUT* include lib install* +set MPICCEXTRACOMP="`/opt/lam714-gcc346-pure/bin/mpicc --showme:compile`" +set MPICXXEXTRACOMP="`/opt/lam714-gcc346-pure/bin/mpiCC --showme:compile`" +set MPIEXTRALINK="`/opt/lam714-gcc346-pure/bin/mpiCC --showme:link`" +set PURIFY="/usr/local/rational/releases/PurifyPlus.7.0/i386_linux2/bin/purify" +set GCC="/usr/bin/gcc346" +set GCXX="/usr/bin/g++346" + +/home/kddevin/cmake/bin/cmake \ +-D CMAKE_INSTALL_PREFIX:FILEPATH="/Net/local/homes/kddevin/code/Trilinos/Obj_pure" \ +-D TPL_ENABLE_MPI:BOOL=ON \ +-D MPI_USE_COMPILER_WRAPPERS:BOOL=OFF \ +-D MPI_BIN_DIR:STRING="/opt/lam714-gcc346-pure/bin" \ +-D CMAKE_C_COMPILER:STRING="$PURIFY" \ +-D CMAKE_C_FLAGS:STRING="-best-effort -follow-child-processes=yes -cache-dir=/tmp/purify -chain-length=20 $GCC -m64 -g $MPICCEXTRACOMP" \ +-D CMAKE_CXX_COMPILER:STRING="$PURIFY" \ +-D CMAKE_CXX_FLAGS:STRING="-best-effort -follow-child-processes=yes -cache-dir=/tmp/purify -chain-length=20 $GCXX -m64 -g $MPICXXEXTRACOMP" \ +-D Trilinos_EXTRA_LINK_FLAGS:STRING="$MPIEXTRALINK" \ +-D CMAKE_VERBOSE_MAKEFILE:BOOL=ON \ +-D MPI_EXEC_MAX_NUMPROCS:STRING=11 \ +-D Trilinos_ENABLE_Fortran:BOOL=OFF \ +-D Trilinos_ENABLE_ALL_PACKAGES:BOOL=OFF \ +-D Trilinos_ENABLE_EXAMPLES:BOOL=ON \ +-D Trilinos_VERBOSE_CONFIGURE:BOOL=ON \ +-D Trilinos_ENABLE_Zoltan:BOOL=ON \ +-D Zoltan_ENABLE_EXAMPLES:BOOL=OFF \ +-D Zoltan_ENABLE_TESTS:BOOL=ON \ +-D Zoltan_ENABLE_ParMETIS:BOOL=ON \ +-D ParMETIS_LIBRARY_DIRS:FILEPATH="/Net/local/proj/zoltan/arch/linux64/lib/lam/ParMETIS3" \ +-D ParMETIS_INCLUDE_DIRS:FILEPATH="/Net/local/proj/zoltan/arch/all/src/ParMETIS3" \ +-D Zoltan_ENABLE_Scotch:BOOL=ON \ +-D Scotch_LIBRARY_DIRS:FILEPATH="/Net/local/proj/zoltan/arch/linux64/lib/lam/Scotch5" \ +-D Scotch_INCLUDE_DIRS:FILEPATH="/Net/local/proj/zoltan/arch/all/src/Scotch5" \ +.. |& tee OUTPUT.CMAKE + +make |& tee OUTPUT.MAKE diff --git a/Zoltan-3.90/SampleCmakeScripts/linux_zoltan_only_serial b/Zoltan-3.90/SampleCmakeScripts/linux_zoltan_only_serial new file mode 100755 index 00000000..8b7a43fc --- /dev/null +++ b/Zoltan-3.90/SampleCmakeScripts/linux_zoltan_only_serial @@ -0,0 +1,29 @@ +#!/bin/csh +# Sample script for building in serial mode using CMAKE on linux machine godel. +# 64-bit, Zoltan only. +# Run in Trilinos/Obj_cmake. + +# Clean up the mess from previous configurations. +/bin/rm -r cmake* CMake* CPack* CTest* Dart* Trilinos* Testing packages Makefile + +cmake \ +-D CMAKE_INSTALL_PREFIX:FILEPATH="/home/kddevin/code/Trilinos/Obj_cmake_serial" \ +-D TPL_ENABLE_MPI:BOOL=OFF \ +-D CMAKE_C_FLAGS:STRING="-m64 -g" \ +-D CMAKE_CXX_FLAGS:STRING="-m64 -g" \ +-D CMAKE_Fortran_FLAGS:STRING="-m64 -g" \ +-D CMAKE_VERBOSE_MAKEFILE:BOOL=ON \ +-D Trilinos_ENABLE_ALL_PACKAGES:BOOL=OFF \ +-D Trilinos_ENABLE_EXAMPLES:BOOL=ON \ +-D Trilinos_VERBOSE_CONFIGURE:BOOL=ON \ +-D Trilinos_ENABLE_Zoltan:BOOL=ON \ +-D Trilinos_EXTRA_LINK_FLAGS:STRING="-lsimpi" \ +-D Zoltan_ENABLE_EXAMPLES:BOOL=ON \ +-D Zoltan_ENABLE_TESTS:BOOL=ON \ +-D Zoltan_ENABLE_ParMETIS:BOOL=ON \ +-D ParMETIS_INCLUDE_DIRS:FILEPATH="/home/kddevin/code/ParMETIS3_1_siMPI" \ +-D ParMETIS_LIBRARY_DIRS:FILEPATH="/home/kddevin/code/ParMETIS3_1_siMPI" \ +.. |& tee OUTPUT.CMAKE + +make |& tee OUTPUT.MAKE +make install |& tee OUTPUT.INSTALL diff --git a/Zoltan-3.90/SampleCmakeScripts/mac_osX_zoltan_only b/Zoltan-3.90/SampleCmakeScripts/mac_osX_zoltan_only new file mode 100755 index 00000000..238deb72 --- /dev/null +++ b/Zoltan-3.90/SampleCmakeScripts/mac_osX_zoltan_only @@ -0,0 +1,57 @@ +#! +# Sample script for building using CMAKE on Karen's Mac OS X system. +# 64-bit, Zoltan only. +# TPLs = ParMETIS, Scotch and PaToH. +# Run in Trilinos/Obj_cmake. + +# Clean up the mess from previous configurations. +/bin/rm -r cmake* CMake* CPack* CTest* Dart* Trilinos* Testing packages Makefile + +setenv F77 /Users/kddevin/code/lib-m64/lam-7.1.4/bin/mpif77 + +cmake \ +-D CMAKE_INSTALL_PREFIX:FILEPATH="/Users/kddevin/code/trilinos/Obj_cmake" \ +-D TPL_ENABLE_MPI:BOOL=ON \ +-D CMAKE_C_FLAGS:STRING="-m64 -g" \ +-D CMAKE_CXX_FLAGS:STRING="-m64 -g" \ +-D CMAKE_Fortran_FLAGS:STRING="-m64 -g" \ +-D CMAKE_VERBOSE_MAKEFILE:BOOL=ON \ +-D MPI_EXEC_MAX_NUMPROCS:STRING=11 \ +-D Trilinos_ENABLE_ALL_PACKAGES:BOOL=OFF \ +-D Trilinos_ENABLE_EXAMPLES:BOOL=ON \ +-D Trilinos_VERBOSE_CONFIGURE:BOOL=ON \ +-D Trilinos_ENABLE_Zoltan:BOOL=ON \ +-D Trilinos_EXTRA_LINK_FLAGS:STRING="-framework vecLib" \ +-D Zoltan_ENABLE_EXAMPLES:BOOL=ON \ +-D Zoltan_ENABLE_TESTS:BOOL=ON \ +-D Zoltan_ENABLE_ParMETIS:BOOL=ON \ +-D ParMETIS_INCLUDE_DIRS:FILEPATH="/Users/kddevin/code/lib-m64/ParMETIS3_1" \ +-D ParMETIS_LIBRARY_DIRS:FILEPATH="/Users/kddevin/code/lib-m64/ParMETIS3_1" \ +-D Zoltan_ENABLE_Scotch:BOOL=ON \ +-D Scotch_INCLUDE_DIRS:FILEPATH="/Users/kddevin/code/lib-m64/scotch_5.1/include" \ +-D Scotch_LIBRARY_DIRS:FILEPATH="/Users/kddevin/code/lib-m64/scotch_5.1/lib" \ +-D Zoltan_ENABLE_PaToH:BOOL=ON \ +-D PaToH_INCLUDE_DIRS:FILEPATH="/Users/kddevin/code/lib-m64/PaToH/dist" \ +-D PaToH_LIBRARY_DIRS:FILEPATH="/Users/kddevin/code/lib-m64/PaToH/dist" \ +.. |& tee OUTPUT.CMAKE + +make |& tee OUTPUT.MAKE +make install |& tee OUTPUT.INSTALL + +#Other useful flags. +#-D Trilinos_ENABLE_Epetra:BOOL=ON \ +#-D Trilinos_ENABLE_Tpetra:BOOL=ON \ +#-D Trilinos_ENABLE_Isorropia:BOOL=ON \ +#-D Trilinos_ENABLE_EpetraExt:BOOL=ON \ +#-D Trilinos_ENABLE_Fortran:BOOL=OFF \ +#-D MPI_EXTRA_LIBRARY:FILEPATH="" \ +#-D CMAKE_CXX_COMPILER:FILEPATH="mpic++" \ +#-D CMAKE_C_COMPILER:FILEPATH="mpicc" \ + +# NOTE: I hacked my version of CMAKE, as it had errors trying to find +# install_name_tool. I changed the following file to check for +# CMAKE_INSTALL_NAME_TOOL before running FIND_PROGRAM to find it. +# I don't know why it had problems only with install_name_tool, as the +# linker, ranlib, etc., were found with no difficulty. +# /Applications/CMake\ 2.6-2.app/Contents/share/cmake-2.6/Modules/CMakeFindBinUtils.cmake + diff --git a/Zoltan-3.90/SampleConfigurationScripts/linux_trilinos_runtests b/Zoltan-3.90/SampleConfigurationScripts/linux_trilinos_runtests new file mode 100755 index 00000000..8f536590 --- /dev/null +++ b/Zoltan-3.90/SampleConfigurationScripts/linux_trilinos_runtests @@ -0,0 +1,5 @@ +#!/bin/tcsh +which lamboot +lamboot +make runtests-mpi TRILINOS_MPI_GO="'mpirun -np '" + diff --git a/Zoltan-3.90/SampleConfigurationScripts/linux_with_purify_zoltan_only b/Zoltan-3.90/SampleConfigurationScripts/linux_with_purify_zoltan_only new file mode 100755 index 00000000..470207bf --- /dev/null +++ b/Zoltan-3.90/SampleConfigurationScripts/linux_with_purify_zoltan_only @@ -0,0 +1,34 @@ +# Script for building Zoltan with purify on linux workstation octopi +# with openmpi. +# This is a 64-bit installation. + +/bin/rm -r Makefile Makefile.export.zoltan OUTPUT.C OUTPUT.I OUTPUT.M config.cache config.log config.status* example/ include/ lib/ siMPI/ src/ + +set path=(/usr/local/bin $path) +setenv PURE_CC "purify -best-effort -follow-child-processes=yes -cache-dir=/tmp/purify -chain-length=20" +setenv CC "$PURE_CC gcc34 -Wall -Werror-implicit-function-declaration" +setenv CXX "$PURE_CC g++34 -Wall -Werror-implicit-function-declaration" +setenv CFLAGS "-m64 -g" +setenv CXXFLAGS "-m64 -g" +setenv CPPFLAGS "-m64 -g" +setenv FCFLAGS "-m64 -g" +setenv MPIINC `mpicc --showme:compile` +setenv MPILIB `mpicc --showme:link` + +../configure -C \ + --prefix=/Net/local/homes/kddevin/code/zoltan_v3.2/Obj_pure \ + --enable-mpi \ + --with-mpi-compilers=no \ + --disable-fortran \ + --with-gnumake \ + --disable-zoltan-cppdriver \ + --with-libs="$MPILIB" \ + --with-incdirs="$MPIINC" \ + |& tee OUTPUT.C +make everything |& tee OUTPUT.M +make install |& tee OUTPUT.I + +# --with-parmetis \ +# --with-parmetis-incdir="/Net/local/proj/zoltan/arch/all/src/ParMETIS3" \ +# --with-parmetis-libdir="/Net/local/proj/zoltan/arch/linux64/lib/openmpi/ParMETIS3_1" \ + diff --git a/Zoltan-3.90/SampleConfigurationScripts/linux_zoltan_dist b/Zoltan-3.90/SampleConfigurationScripts/linux_zoltan_dist new file mode 100755 index 00000000..97085df7 --- /dev/null +++ b/Zoltan-3.90/SampleConfigurationScripts/linux_zoltan_dist @@ -0,0 +1,15 @@ +#Script for building the Zoltan tarball on a 64-bit Linux workstation +#with OpenMPI. +#This script builds the Zoltan tarball from the Zoltan package directory. +#Assuming running script from +#/Net/local/homes/kddevin/code/zoltan_v3.1/Obj_DIST. +set path=(/usr/local/bin $path) +setenv CFLAGS -m64 +setenv CXXFLAGS -m64 +setenv CPPFLAGS -m64 +setenv FCFLAGS -m64 + +../configure -C \ + --prefix=/Net/local/homes/kddevin/code/zoltan_v3.1/OBJ_DIST \ + --with-gnumake |& tee OUTPUT.C +make dist |& tee OUTPUT.D diff --git a/Zoltan-3.90/SampleConfigurationScripts/linux_zoltan_only b/Zoltan-3.90/SampleConfigurationScripts/linux_zoltan_only new file mode 100755 index 00000000..62c238b3 --- /dev/null +++ b/Zoltan-3.90/SampleConfigurationScripts/linux_zoltan_only @@ -0,0 +1,22 @@ +#Script for building Zoltan only on a 64-bit Linux workstation +#with OpenMPI. +#This script builds only Zoltan from the Zoltan package directory. +#Assuming running script from +#/Net/local/homes/kddevin/code/zoltan_v3.1/Obj_linux64. +set path=(/usr/local/bin $path) +setenv CFLAGS -m64 +setenv CXXFLAGS -m64 +setenv CPPFLAGS -m64 +setenv FCFLAGS -m64 + +../configure -C \ + --prefix=/Net/local/homes/kddevin/code/zoltan_v3.1/Obj_linux64 \ + --with-parmetis \ + --with-parmetis-incdir="/Net/local/proj/zoltan/arch/all/src/ParMETIS3_1" \ + --with-parmetis-libdir="/Net/local/proj/zoltan/arch/linux64/lib/openmpi/ParMETIS3_1" \ + --with-scotch \ + --with-scotch-incdir="/Net/local/proj/zoltan/arch/all/src/Scotch5" \ + --with-scotch-libdir="/Net/local/proj/zoltan/arch/linux64/lib/openmpi/Scotch5" \ + --with-gnumake |& tee OUTPUT.C +make everything |& tee OUTPUT.M +make install |& tee OUTPUT.I diff --git a/Zoltan-3.90/SampleConfigurationScripts/mac_osX_no_fortran b/Zoltan-3.90/SampleConfigurationScripts/mac_osX_no_fortran new file mode 100755 index 00000000..88673045 --- /dev/null +++ b/Zoltan-3.90/SampleConfigurationScripts/mac_osX_no_fortran @@ -0,0 +1,25 @@ +#Script for building Zoltan and Isorropia on a 64-bit Mac OS X +#with LAM MPI on a machine with NO FORTRAN COMPILER. Note the option +#--disable-fortran. +#This script builds only Zoltan but builds it from the top-level Trilinos +#directory. +#Assuming running script from /Users/kddevin/code/trilinos_v9.0/Obj_mac64. +set path=(/Users/kddevin/code/lib-m64/lam-7.1.4/bin $path) +setenv CC /Users/kddevin/code/lib-m64/lam-7.1.4/bin/mpicc +setenv CXX /Users/kddevin/code/lib-m64/lam-7.1.4/bin/mpic++ +setenv FC /Users/kddevin/code/lib-m64/lam-7.1.4/bin/mpif77 +setenv CFLAGS -m64 +setenv CXXFLAGS -m64 +setenv CPPFLAGS -m64 +setenv FCFLAGS -m64 + +../configure -C \ + --prefix=/Users/kddevin/code/trilinos_v9.0/Obj_mac64 \ + --with-parmetis \ + --with-parmetis-incdir="/Users/kddevin/code/lib-m64/ParMETIS3_1" \ + --with-parmetis-libdir="/Users/kddevin/code/lib-m64/ParMETIS3_1" \ + --with-libs="-framework vecLib" \ + --with-gnumake \ + --with-cxxflags="-DLAM_BUILDING" |& tee OUTPUT.C +make everything |& tee OUTPUT.M +make install |& tee OUTPUT.I diff --git a/Zoltan-3.90/SampleConfigurationScripts/mac_osX_zoltan_dist b/Zoltan-3.90/SampleConfigurationScripts/mac_osX_zoltan_dist new file mode 100755 index 00000000..2eb62edf --- /dev/null +++ b/Zoltan-3.90/SampleConfigurationScripts/mac_osX_zoltan_dist @@ -0,0 +1,4 @@ +# Since Mac OS X is case-insensitive, autoconf produces a bad Makefile.in +# Don't use Mac OS X until this bug is fixed. + +echo "Due to bug in autoconf Makefile.in, do not make dist on a case-insensitive system." diff --git a/Zoltan-3.90/SampleConfigurationScripts/mac_osX_zoltan_only b/Zoltan-3.90/SampleConfigurationScripts/mac_osX_zoltan_only new file mode 100755 index 00000000..63e27a0d --- /dev/null +++ b/Zoltan-3.90/SampleConfigurationScripts/mac_osX_zoltan_only @@ -0,0 +1,18 @@ +#Script for building Zoltan only on a 64-bit Mac OS X with LAM mpi +#Assuming running this script from /Users/kddevin/code/zoltan_v3.1/OBJ_MAC64. + +setenv CFLAGS -m64 +setenv CXXFLAGS -m64 +../configure \ + --prefix=/Users/kddevin/code/zoltan_v3.1/OBJ_MAC64 \ + --with-gnumake \ + --with-parmetis \ + --with-parmetis-incdir="/Users/kddevin/code/lib-m64/ParMETIS3_1" \ + --with-parmetis-libdir="/Users/kddevin/code/lib-m64/ParMETIS3_1" \ + --with-patoh \ + --with-patoh-incdir="/Users/kddevin/code/lib-m64/PaToH/dist" \ + --with-patoh-libdir="/Users/kddevin/code/lib-m64/PaToH/dist" \ + |& tee OUTPUT.C +make everything |& tee OUTPUT.M +make install |& tee OUTPUT.I + diff --git a/Zoltan-3.90/VERSION b/Zoltan-3.90/VERSION new file mode 100644 index 00000000..0ddaa6f5 --- /dev/null +++ b/Zoltan-3.90/VERSION @@ -0,0 +1,2 @@ +To determine the exact version number, type + grep ZOLTAN_VERSION_NUMBER src/include/zoltan.h diff --git a/Zoltan-3.90/bootstrap-local b/Zoltan-3.90/bootstrap-local new file mode 100755 index 00000000..50ffdafa --- /dev/null +++ b/Zoltan-3.90/bootstrap-local @@ -0,0 +1,3 @@ +#! /bin/csh +setenv AUTOHEADER 'echo SkippingAutoHeader' +autoreconf -i -f -v diff --git a/Zoltan-3.90/cmake/Dependencies.cmake b/Zoltan-3.90/cmake/Dependencies.cmake new file mode 100644 index 00000000..5ffe0b40 --- /dev/null +++ b/Zoltan-3.90/cmake/Dependencies.cmake @@ -0,0 +1,8 @@ +SET(LIB_REQUIRED_DEP_PACKAGES) +SET(LIB_OPTIONAL_DEP_PACKAGES) +SET(TEST_REQUIRED_DEP_PACKAGES) +SET(TEST_OPTIONAL_DEP_PACKAGES) +SET(LIB_REQUIRED_DEP_TPLS) +SET(LIB_OPTIONAL_DEP_TPLS MPI METIS ParMETIS PaToH Scotch Zlib CCOLAMD OVIS) +SET(TEST_REQUIRED_DEP_TPLS) +SET(TEST_OPTIONAL_DEP_TPLS) diff --git a/Zoltan-3.90/cmake/Zoltan_config.h.in b/Zoltan-3.90/cmake/Zoltan_config.h.in new file mode 100644 index 00000000..d53dfd69 --- /dev/null +++ b/Zoltan-3.90/cmake/Zoltan_config.h.in @@ -0,0 +1,74 @@ +/* src/include/Zoltan_config.h.in. Generated from configure.ac by autoheader. */ + +/* KDD Copied F77 macros from packages/epetra/cmake/Epetra_config.h.in. */ +/* Define to dummy `main' function (if any) required to link to the Fortran + libraries. */ +#cmakedefine F77_DUMMY_MAIN + +/* Define to a macro mangling the given C identifier (in lower and upper + case), which must not contain underscores, for linking with Fortran. */ +#ifndef FC_FUNC + #define FC_FUNC@F77_FUNC@ +#endif + +/* As FC_FUNC, but for C identifiers containing underscores. */ +#ifndef FC_FUNC_ + #define FC_FUNC_@F77_FUNC_@ +#endif + +/* Define if F77 and FC dummy `main' functions are identical. */ +#cmakedefine FC_DUMMY_MAIN_EQ_F77 + +/* ZOLTAN_ID_TYPE is unsigned int */ +#cmakedefine UNSIGNED_INT_GLOBAL_IDS + +/* ZOLTAN_ID_TYPE is unsigned long */ +#cmakedefine UNSIGNED_LONG_GLOBAL_IDS + +/* ZOLTAN_ID_TYPE is unsigned long long */ +#cmakedefine UNSIGNED_LONG_LONG_GLOBAL_IDS + +/* define if we want to use MPI */ +#cmakedefine HAVE_MPI + +/* Define if want to build with nemesis_exodus enabled */ +#cmakedefine HAVE_NEMESIS_EXODUS + +/* Define if want to build with parmetis enabled */ +#cmakedefine HAVE_METIS + +/* Define if want to build with parmetis enabled */ +#cmakedefine HAVE_PARMETIS + +/* Define if want to build with patoh enabled */ +#cmakedefine HAVE_PATOH + +/* Define if want to build with scotch enabled */ +#cmakedefine HAVE_SCOTCH + +/* Define if want to build with OVIS enabled */ +#cmakedefine HAVE_OVIS + +/* Define if want to build with OVIS enabled */ +#cmakedefine HAVE_PURIFY + +/* Define if DON'T want support for MPI TPL */ +#ifndef HAVE_MPI +#define NO_MPI_TPL +#endif + +/* Define if want to build with zlib enabled */ +#cmakedefine ZHAVE_GZIP + +/* Use to have only filename when debugging memory */ +#define SHORT_FILE + +/* HUND support */ +#cmakedefine HAVE_ZOLTAN_HUND + +/* Revert to Old Hash function support */ +#cmakedefine HAVE_ZOLTAN_KNUTH_HASH + +#ifdef HAVE_ZOLTAN_HUND +#define CEDRIC_2D_PARTITIONS +#endif diff --git a/Zoltan-3.90/config/ax_f90_module_case.m4 b/Zoltan-3.90/config/ax_f90_module_case.m4 new file mode 100644 index 00000000..7f16c888 --- /dev/null +++ b/Zoltan-3.90/config/ax_f90_module_case.m4 @@ -0,0 +1,50 @@ +dnl Check case (upper or lower) of F90 module files. +dnl Also checks module suffix, but we return only ax_cv_f90_modulecase. +AC_DEFUN([AX_F90_MODULE_CASE],[ +AC_CACHE_CHECK([fortran 90 module file suffix and case], +ax_cv_f90_modulecase, +[ +rm -f conftest* +cat >conftest.f < conftest.out 2>&1 ; then + FCMODSUFFIX=`ls conftest* | grep -v conftest.f | grep -v conftest.o` + echo "KDDKDD CASE 2" ${FCMODSUFFIX} + FCMODSUFFIX=`echo "${FCMODSUFFIX}" | sed -e 's/conftest\.//g'` + if test -z "${FCMODSUFFIX}" ; then + FCMODSUFFIX=`ls CONFTEST* 2>/dev/null \ + | grep -v CONFTEST.f | grep -v CONFTEST.o` + FCMODSUFFIX=`echo "${FCMODSUFFIX}" | sed -e 's/CONFTEST\.//g'` + if test -n "${FCMODSUFFIX}" ; then + testname="CONFTEST" + modcase="upper" + fi + fi + if test -z "${FCMODSUFFIX}" ; then + AC_MSG_RESULT(unknown) + # Use mod if we can't figure it out + FCMODSUFFIX="mod" + else + AC_MSG_RESULT(${FCMODSUFFIX}) + fi +else + AC_MSG_RESULT(unknown) +fi +#AC_SUBST(FCMODSUFFIX) +AC_MSG_CHECKING(for case of module names) +if test "${modcase}" = "lower" ; then + AC_MSG_RESULT(lower) + ax_cv_f90_modulecase="lower" +else + AC_MSG_RESULT(upper) + ax_cv_f90_modulecase="upper" +fi +])]) diff --git a/Zoltan-3.90/config/ax_f90_module_flag.m4 b/Zoltan-3.90/config/ax_f90_module_flag.m4 new file mode 100644 index 00000000..fe86ba47 --- /dev/null +++ b/Zoltan-3.90/config/ax_f90_module_flag.m4 @@ -0,0 +1,67 @@ +# =========================================================================== +# http://www.nongnu.org/autoconf-archive/ax_f90_module_flag.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_F90_MODULE_FLAG +# +# DESCRIPTION +# +# Find Fortran 90 modules inclusion flag. The module inclusion flag is +# stored in the cached variable ax_f90_modflag. An error is triggered if +# the flag cannot be found. Supported are the -I GNU compilers flag, the +# -M SUN compilers flag, and the -p Absoft Pro Fortran compiler flag. +# +# LICENSE +# +# Copyright (c) 2009 Luc Maisonobe +# Copyright (c) 2009 Julian C. Cummings +# Copyright (c) 2009 Alexander Pletzer +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. + +AC_DEFUN([AX_F90_MODULE_FLAG],[ +AC_CACHE_CHECK([fortran 90 modules inclusion flag], +ax_cv_f90_modflag, +[AC_LANG_PUSH(Fortran) +i=0 +while test \( -f tmpdir_$i \) -o \( -d tmpdir_$i \) ; do + i=`expr $i + 1` +done +mkdir tmpdir_$i +cd tmpdir_$i +AC_COMPILE_IFELSE([ +!234567 + module conftest_module + contains + subroutine conftest_routine + write(*,'(a)') 'gotcha!' + end subroutine conftest_routine + end module conftest_module + ],[],[]) +cd .. +ax_cv_f90_modflag="not found" +for ax_flag in "-I " "-M" "-p"; do + if test "$ax_cv_f90_modflag" = "not found" ; then + ax_save_FCFLAGS="$FCFLAGS" + FCFLAGS="$ax_save_FCFLAGS ${ax_flag}tmpdir_$i" + AC_COMPILE_IFELSE([ +!234567 + program conftest_program + use conftest_module + call conftest_routine + end program conftest_program + ],[ax_cv_f90_modflag="$ax_flag"],[]) + FCFLAGS="$ax_save_FCFLAGS" + fi +done +rm -fr tmpdir_$i +if test "$ax_cv_f90_modflag" = "not found" ; then + AC_MSG_ERROR([unable to find compiler flag for modules inclusion]) +fi +AC_LANG_POP(Fortran) +])]) + diff --git a/Zoltan-3.90/config/compile b/Zoltan-3.90/config/compile new file mode 100755 index 00000000..b1f47491 --- /dev/null +++ b/Zoltan-3.90/config/compile @@ -0,0 +1,310 @@ +#! /bin/sh +# Wrapper for compilers which do not understand '-c -o'. + +scriptversion=2012-01-04.17; # UTC + +# Copyright (C) 1999, 2000, 2003, 2004, 2005, 2009, 2010, 2012 Free +# Software Foundation, Inc. +# Written by Tom Tromey . +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +# This file is maintained in Automake, please report +# bugs to or send patches to +# . + +nl=' +' + +# We need space, tab and new line, in precisely that order. Quoting is +# there to prevent tools from complaining about whitespace usage. +IFS=" "" $nl" + +file_conv= + +# func_file_conv build_file lazy +# Convert a $build file to $host form and store it in $file +# Currently only supports Windows hosts. If the determined conversion +# type is listed in (the comma separated) LAZY, no conversion will +# take place. +func_file_conv () +{ + file=$1 + case $file in + / | /[!/]*) # absolute file, and not a UNC file + if test -z "$file_conv"; then + # lazily determine how to convert abs files + case `uname -s` in + MINGW*) + file_conv=mingw + ;; + CYGWIN*) + file_conv=cygwin + ;; + *) + file_conv=wine + ;; + esac + fi + case $file_conv/,$2, in + *,$file_conv,*) + ;; + mingw/*) + file=`cmd //C echo "$file " | sed -e 's/"\(.*\) " *$/\1/'` + ;; + cygwin/*) + file=`cygpath -m "$file" || echo "$file"` + ;; + wine/*) + file=`winepath -w "$file" || echo "$file"` + ;; + esac + ;; + esac +} + +# func_cl_wrapper cl arg... +# Adjust compile command to suit cl +func_cl_wrapper () +{ + # Assume a capable shell + lib_path= + shared=: + linker_opts= + for arg + do + if test -n "$eat"; then + eat= + else + case $1 in + -o) + # configure might choose to run compile as 'compile cc -o foo foo.c'. + eat=1 + case $2 in + *.o | *.[oO][bB][jJ]) + func_file_conv "$2" + set x "$@" -Fo"$file" + shift + ;; + *) + func_file_conv "$2" + set x "$@" -Fe"$file" + shift + ;; + esac + ;; + -I*) + func_file_conv "${1#-I}" mingw + set x "$@" -I"$file" + shift + ;; + -l*) + lib=${1#-l} + found=no + save_IFS=$IFS + IFS=';' + for dir in $lib_path $LIB + do + IFS=$save_IFS + if $shared && test -f "$dir/$lib.dll.lib"; then + found=yes + set x "$@" "$dir/$lib.dll.lib" + break + fi + if test -f "$dir/$lib.lib"; then + found=yes + set x "$@" "$dir/$lib.lib" + break + fi + done + IFS=$save_IFS + + test "$found" != yes && set x "$@" "$lib.lib" + shift + ;; + -L*) + func_file_conv "${1#-L}" + if test -z "$lib_path"; then + lib_path=$file + else + lib_path="$lib_path;$file" + fi + linker_opts="$linker_opts -LIBPATH:$file" + ;; + -static) + shared=false + ;; + -Wl,*) + arg=${1#-Wl,} + save_ifs="$IFS"; IFS=',' + for flag in $arg; do + IFS="$save_ifs" + linker_opts="$linker_opts $flag" + done + IFS="$save_ifs" + ;; + -Xlinker) + eat=1 + linker_opts="$linker_opts $2" + ;; + -*) + set x "$@" "$1" + shift + ;; + *.cc | *.CC | *.cxx | *.CXX | *.[cC]++) + func_file_conv "$1" + set x "$@" -Tp"$file" + shift + ;; + *.c | *.cpp | *.CPP | *.lib | *.LIB | *.Lib | *.OBJ | *.obj | *.[oO]) + func_file_conv "$1" mingw + set x "$@" "$file" + shift + ;; + *) + set x "$@" "$1" + shift + ;; + esac + fi + shift + done + if test -n "$linker_opts"; then + linker_opts="-link$linker_opts" + fi + exec "$@" $linker_opts + exit 1 +} + +eat= + +case $1 in + '') + echo "$0: No command. Try '$0 --help' for more information." 1>&2 + exit 1; + ;; + -h | --h*) + cat <<\EOF +Usage: compile [--help] [--version] PROGRAM [ARGS] + +Wrapper for compilers which do not understand '-c -o'. +Remove '-o dest.o' from ARGS, run PROGRAM with the remaining +arguments, and rename the output as expected. + +If you are trying to build a whole package this is not the +right script to run: please start by reading the file 'INSTALL'. + +Report bugs to . +EOF + exit $? + ;; + -v | --v*) + echo "compile $scriptversion" + exit $? + ;; + cl | *[/\\]cl | cl.exe | *[/\\]cl.exe ) + func_cl_wrapper "$@" # Doesn't return... + ;; +esac + +ofile= +cfile= + +for arg +do + if test -n "$eat"; then + eat= + else + case $1 in + -o) + # configure might choose to run compile as 'compile cc -o foo foo.c'. + # So we strip '-o arg' only if arg is an object. + eat=1 + case $2 in + *.o | *.obj) + ofile=$2 + ;; + *) + set x "$@" -o "$2" + shift + ;; + esac + ;; + *.c) + cfile=$1 + set x "$@" "$1" + shift + ;; + *) + set x "$@" "$1" + shift + ;; + esac + fi + shift +done + +if test -z "$ofile" || test -z "$cfile"; then + # If no '-o' option was seen then we might have been invoked from a + # pattern rule where we don't need one. That is ok -- this is a + # normal compilation that the losing compiler can handle. If no + # '.c' file was seen then we are probably linking. That is also + # ok. + exec "$@" +fi + +# Name of file we expect compiler to create. +cofile=`echo "$cfile" | sed 's|^.*[\\/]||; s|^[a-zA-Z]:||; s/\.c$/.o/'` + +# Create the lock directory. +# Note: use '[/\\:.-]' here to ensure that we don't use the same name +# that we are using for the .o file. Also, base the name on the expected +# object file name, since that is what matters with a parallel build. +lockdir=`echo "$cofile" | sed -e 's|[/\\:.-]|_|g'`.d +while true; do + if mkdir "$lockdir" >/dev/null 2>&1; then + break + fi + sleep 1 +done +# FIXME: race condition here if user kills between mkdir and trap. +trap "rmdir '$lockdir'; exit 1" 1 2 15 + +# Run the compile. +"$@" +ret=$? + +if test -f "$cofile"; then + test "$cofile" = "$ofile" || mv "$cofile" "$ofile" +elif test -f "${cofile}bj"; then + test "${cofile}bj" = "$ofile" || mv "${cofile}bj" "$ofile" +fi + +rmdir "$lockdir" +exit $ret + +# Local Variables: +# mode: shell-script +# sh-indentation: 2 +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "scriptversion=" +# time-stamp-format: "%:y-%02m-%02d.%02H" +# time-stamp-time-zone: "UTC" +# time-stamp-end: "; # UTC" +# End: diff --git a/Zoltan-3.90/config/config.guess b/Zoltan-3.90/config/config.guess new file mode 100755 index 00000000..49ba16f1 --- /dev/null +++ b/Zoltan-3.90/config/config.guess @@ -0,0 +1,1522 @@ +#! /bin/sh +# Attempt to guess a canonical system name. +# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, +# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, +# 2011, 2012 Free Software Foundation, Inc. + +timestamp='2012-01-01' + +# This file is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA +# 02110-1301, USA. +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + + +# Originally written by Per Bothner. Please send patches (context +# diff format) to and include a ChangeLog +# entry. +# +# This script attempts to guess a canonical system name similar to +# config.sub. If it succeeds, it prints the system name on stdout, and +# exits with 0. Otherwise, it exits with 1. +# +# You can get the latest version of this script from: +# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD + +me=`echo "$0" | sed -e 's,.*/,,'` + +usage="\ +Usage: $0 [OPTION] + +Output the configuration name of the system \`$me' is run on. + +Operation modes: + -h, --help print this help, then exit + -t, --time-stamp print date of last modification, then exit + -v, --version print version number, then exit + +Report bugs and patches to ." + +version="\ +GNU config.guess ($timestamp) + +Originally written by Per Bothner. +Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, +2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 +Free Software Foundation, Inc. + +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." + +help=" +Try \`$me --help' for more information." + +# Parse command line +while test $# -gt 0 ; do + case $1 in + --time-stamp | --time* | -t ) + echo "$timestamp" ; exit ;; + --version | -v ) + echo "$version" ; exit ;; + --help | --h* | -h ) + echo "$usage"; exit ;; + -- ) # Stop option processing + shift; break ;; + - ) # Use stdin as input. + break ;; + -* ) + echo "$me: invalid option $1$help" >&2 + exit 1 ;; + * ) + break ;; + esac +done + +if test $# != 0; then + echo "$me: too many arguments$help" >&2 + exit 1 +fi + +trap 'exit 1' 1 2 15 + +# CC_FOR_BUILD -- compiler used by this script. Note that the use of a +# compiler to aid in system detection is discouraged as it requires +# temporary files to be created and, as you can see below, it is a +# headache to deal with in a portable fashion. + +# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still +# use `HOST_CC' if defined, but it is deprecated. + +# Portable tmp directory creation inspired by the Autoconf team. + +set_cc_for_build=' +trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ; +trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ; +: ${TMPDIR=/tmp} ; + { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } || + { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } || + { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } || + { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ; +dummy=$tmp/dummy ; +tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ; +case $CC_FOR_BUILD,$HOST_CC,$CC in + ,,) echo "int x;" > $dummy.c ; + for c in cc gcc c89 c99 ; do + if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then + CC_FOR_BUILD="$c"; break ; + fi ; + done ; + if test x"$CC_FOR_BUILD" = x ; then + CC_FOR_BUILD=no_compiler_found ; + fi + ;; + ,,*) CC_FOR_BUILD=$CC ;; + ,*,*) CC_FOR_BUILD=$HOST_CC ;; +esac ; set_cc_for_build= ;' + +# This is needed to find uname on a Pyramid OSx when run in the BSD universe. +# (ghazi@noc.rutgers.edu 1994-08-24) +if (test -f /.attbin/uname) >/dev/null 2>&1 ; then + PATH=$PATH:/.attbin ; export PATH +fi + +UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown +UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown +UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown +UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown + +# Note: order is significant - the case branches are not exclusive. + +case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in + *:NetBSD:*:*) + # NetBSD (nbsd) targets should (where applicable) match one or + # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*, + # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently + # switched to ELF, *-*-netbsd* would select the old + # object file format. This provides both forward + # compatibility and a consistent mechanism for selecting the + # object file format. + # + # Note: NetBSD doesn't particularly care about the vendor + # portion of the name. We always set it to "unknown". + sysctl="sysctl -n hw.machine_arch" + UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \ + /usr/sbin/$sysctl 2>/dev/null || echo unknown)` + case "${UNAME_MACHINE_ARCH}" in + armeb) machine=armeb-unknown ;; + arm*) machine=arm-unknown ;; + sh3el) machine=shl-unknown ;; + sh3eb) machine=sh-unknown ;; + sh5el) machine=sh5le-unknown ;; + *) machine=${UNAME_MACHINE_ARCH}-unknown ;; + esac + # The Operating System including object format, if it has switched + # to ELF recently, or will in the future. + case "${UNAME_MACHINE_ARCH}" in + arm*|i386|m68k|ns32k|sh3*|sparc|vax) + eval $set_cc_for_build + if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ELF__ + then + # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout). + # Return netbsd for either. FIX? + os=netbsd + else + os=netbsdelf + fi + ;; + *) + os=netbsd + ;; + esac + # The OS release + # Debian GNU/NetBSD machines have a different userland, and + # thus, need a distinct triplet. However, they do not need + # kernel version information, so it can be replaced with a + # suitable tag, in the style of linux-gnu. + case "${UNAME_VERSION}" in + Debian*) + release='-gnu' + ;; + *) + release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'` + ;; + esac + # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM: + # contains redundant information, the shorter form: + # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used. + echo "${machine}-${os}${release}" + exit ;; + *:OpenBSD:*:*) + UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'` + echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE} + exit ;; + *:ekkoBSD:*:*) + echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE} + exit ;; + *:SolidBSD:*:*) + echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE} + exit ;; + macppc:MirBSD:*:*) + echo powerpc-unknown-mirbsd${UNAME_RELEASE} + exit ;; + *:MirBSD:*:*) + echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE} + exit ;; + alpha:OSF1:*:*) + case $UNAME_RELEASE in + *4.0) + UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'` + ;; + *5.*) + UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'` + ;; + esac + # According to Compaq, /usr/sbin/psrinfo has been available on + # OSF/1 and Tru64 systems produced since 1995. I hope that + # covers most systems running today. This code pipes the CPU + # types through head -n 1, so we only detect the type of CPU 0. + ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1` + case "$ALPHA_CPU_TYPE" in + "EV4 (21064)") + UNAME_MACHINE="alpha" ;; + "EV4.5 (21064)") + UNAME_MACHINE="alpha" ;; + "LCA4 (21066/21068)") + UNAME_MACHINE="alpha" ;; + "EV5 (21164)") + UNAME_MACHINE="alphaev5" ;; + "EV5.6 (21164A)") + UNAME_MACHINE="alphaev56" ;; + "EV5.6 (21164PC)") + UNAME_MACHINE="alphapca56" ;; + "EV5.7 (21164PC)") + UNAME_MACHINE="alphapca57" ;; + "EV6 (21264)") + UNAME_MACHINE="alphaev6" ;; + "EV6.7 (21264A)") + UNAME_MACHINE="alphaev67" ;; + "EV6.8CB (21264C)") + UNAME_MACHINE="alphaev68" ;; + "EV6.8AL (21264B)") + UNAME_MACHINE="alphaev68" ;; + "EV6.8CX (21264D)") + UNAME_MACHINE="alphaev68" ;; + "EV6.9A (21264/EV69A)") + UNAME_MACHINE="alphaev69" ;; + "EV7 (21364)") + UNAME_MACHINE="alphaev7" ;; + "EV7.9 (21364A)") + UNAME_MACHINE="alphaev79" ;; + esac + # A Pn.n version is a patched version. + # A Vn.n version is a released version. + # A Tn.n version is a released field test version. + # A Xn.n version is an unreleased experimental baselevel. + # 1.2 uses "1.2" for uname -r. + echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` + # Reset EXIT trap before exiting to avoid spurious non-zero exit code. + exitcode=$? + trap '' 0 + exit $exitcode ;; + Alpha\ *:Windows_NT*:*) + # How do we know it's Interix rather than the generic POSIX subsystem? + # Should we change UNAME_MACHINE based on the output of uname instead + # of the specific Alpha model? + echo alpha-pc-interix + exit ;; + 21064:Windows_NT:50:3) + echo alpha-dec-winnt3.5 + exit ;; + Amiga*:UNIX_System_V:4.0:*) + echo m68k-unknown-sysv4 + exit ;; + *:[Aa]miga[Oo][Ss]:*:*) + echo ${UNAME_MACHINE}-unknown-amigaos + exit ;; + *:[Mm]orph[Oo][Ss]:*:*) + echo ${UNAME_MACHINE}-unknown-morphos + exit ;; + *:OS/390:*:*) + echo i370-ibm-openedition + exit ;; + *:z/VM:*:*) + echo s390-ibm-zvmoe + exit ;; + *:OS400:*:*) + echo powerpc-ibm-os400 + exit ;; + arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*) + echo arm-acorn-riscix${UNAME_RELEASE} + exit ;; + arm:riscos:*:*|arm:RISCOS:*:*) + echo arm-unknown-riscos + exit ;; + SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*) + echo hppa1.1-hitachi-hiuxmpp + exit ;; + Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*) + # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE. + if test "`(/bin/universe) 2>/dev/null`" = att ; then + echo pyramid-pyramid-sysv3 + else + echo pyramid-pyramid-bsd + fi + exit ;; + NILE*:*:*:dcosx) + echo pyramid-pyramid-svr4 + exit ;; + DRS?6000:unix:4.0:6*) + echo sparc-icl-nx6 + exit ;; + DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*) + case `/usr/bin/uname -p` in + sparc) echo sparc-icl-nx7; exit ;; + esac ;; + s390x:SunOS:*:*) + echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4H:SunOS:5.*:*) + echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*) + echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*) + echo i386-pc-auroraux${UNAME_RELEASE} + exit ;; + i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*) + eval $set_cc_for_build + SUN_ARCH="i386" + # If there is a compiler, see if it is configured for 64-bit objects. + # Note that the Sun cc does not turn __LP64__ into 1 like gcc does. + # This test works for both compilers. + if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then + if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \ + (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_64BIT_ARCH >/dev/null + then + SUN_ARCH="x86_64" + fi + fi + echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4*:SunOS:6*:*) + # According to config.sub, this is the proper way to canonicalize + # SunOS6. Hard to guess exactly what SunOS6 will be like, but + # it's likely to be more like Solaris than SunOS4. + echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4*:SunOS:*:*) + case "`/usr/bin/arch -k`" in + Series*|S4*) + UNAME_RELEASE=`uname -v` + ;; + esac + # Japanese Language versions have a version number like `4.1.3-JL'. + echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'` + exit ;; + sun3*:SunOS:*:*) + echo m68k-sun-sunos${UNAME_RELEASE} + exit ;; + sun*:*:4.2BSD:*) + UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null` + test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3 + case "`/bin/arch`" in + sun3) + echo m68k-sun-sunos${UNAME_RELEASE} + ;; + sun4) + echo sparc-sun-sunos${UNAME_RELEASE} + ;; + esac + exit ;; + aushp:SunOS:*:*) + echo sparc-auspex-sunos${UNAME_RELEASE} + exit ;; + # The situation for MiNT is a little confusing. The machine name + # can be virtually everything (everything which is not + # "atarist" or "atariste" at least should have a processor + # > m68000). The system name ranges from "MiNT" over "FreeMiNT" + # to the lowercase version "mint" (or "freemint"). Finally + # the system name "TOS" denotes a system which is actually not + # MiNT. But MiNT is downward compatible to TOS, so this should + # be no problem. + atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*) + echo m68k-atari-mint${UNAME_RELEASE} + exit ;; + atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*) + echo m68k-atari-mint${UNAME_RELEASE} + exit ;; + *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*) + echo m68k-atari-mint${UNAME_RELEASE} + exit ;; + milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*) + echo m68k-milan-mint${UNAME_RELEASE} + exit ;; + hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*) + echo m68k-hades-mint${UNAME_RELEASE} + exit ;; + *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*) + echo m68k-unknown-mint${UNAME_RELEASE} + exit ;; + m68k:machten:*:*) + echo m68k-apple-machten${UNAME_RELEASE} + exit ;; + powerpc:machten:*:*) + echo powerpc-apple-machten${UNAME_RELEASE} + exit ;; + RISC*:Mach:*:*) + echo mips-dec-mach_bsd4.3 + exit ;; + RISC*:ULTRIX:*:*) + echo mips-dec-ultrix${UNAME_RELEASE} + exit ;; + VAX*:ULTRIX*:*:*) + echo vax-dec-ultrix${UNAME_RELEASE} + exit ;; + 2020:CLIX:*:* | 2430:CLIX:*:*) + echo clipper-intergraph-clix${UNAME_RELEASE} + exit ;; + mips:*:*:UMIPS | mips:*:*:RISCos) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c +#ifdef __cplusplus +#include /* for printf() prototype */ + int main (int argc, char *argv[]) { +#else + int main (argc, argv) int argc; char *argv[]; { +#endif + #if defined (host_mips) && defined (MIPSEB) + #if defined (SYSTYPE_SYSV) + printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0); + #endif + #if defined (SYSTYPE_SVR4) + printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0); + #endif + #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD) + printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0); + #endif + #endif + exit (-1); + } +EOF + $CC_FOR_BUILD -o $dummy $dummy.c && + dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` && + SYSTEM_NAME=`$dummy $dummyarg` && + { echo "$SYSTEM_NAME"; exit; } + echo mips-mips-riscos${UNAME_RELEASE} + exit ;; + Motorola:PowerMAX_OS:*:*) + echo powerpc-motorola-powermax + exit ;; + Motorola:*:4.3:PL8-*) + echo powerpc-harris-powermax + exit ;; + Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*) + echo powerpc-harris-powermax + exit ;; + Night_Hawk:Power_UNIX:*:*) + echo powerpc-harris-powerunix + exit ;; + m88k:CX/UX:7*:*) + echo m88k-harris-cxux7 + exit ;; + m88k:*:4*:R4*) + echo m88k-motorola-sysv4 + exit ;; + m88k:*:3*:R3*) + echo m88k-motorola-sysv3 + exit ;; + AViiON:dgux:*:*) + # DG/UX returns AViiON for all architectures + UNAME_PROCESSOR=`/usr/bin/uname -p` + if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ] + then + if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \ + [ ${TARGET_BINARY_INTERFACE}x = x ] + then + echo m88k-dg-dgux${UNAME_RELEASE} + else + echo m88k-dg-dguxbcs${UNAME_RELEASE} + fi + else + echo i586-dg-dgux${UNAME_RELEASE} + fi + exit ;; + M88*:DolphinOS:*:*) # DolphinOS (SVR3) + echo m88k-dolphin-sysv3 + exit ;; + M88*:*:R3*:*) + # Delta 88k system running SVR3 + echo m88k-motorola-sysv3 + exit ;; + XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3) + echo m88k-tektronix-sysv3 + exit ;; + Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD) + echo m68k-tektronix-bsd + exit ;; + *:IRIX*:*:*) + echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'` + exit ;; + ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX. + echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id + exit ;; # Note that: echo "'`uname -s`'" gives 'AIX ' + i*86:AIX:*:*) + echo i386-ibm-aix + exit ;; + ia64:AIX:*:*) + if [ -x /usr/bin/oslevel ] ; then + IBM_REV=`/usr/bin/oslevel` + else + IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} + fi + echo ${UNAME_MACHINE}-ibm-aix${IBM_REV} + exit ;; + *:AIX:2:3) + if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #include + + main() + { + if (!__power_pc()) + exit(1); + puts("powerpc-ibm-aix3.2.5"); + exit(0); + } +EOF + if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` + then + echo "$SYSTEM_NAME" + else + echo rs6000-ibm-aix3.2.5 + fi + elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then + echo rs6000-ibm-aix3.2.4 + else + echo rs6000-ibm-aix3.2 + fi + exit ;; + *:AIX:*:[4567]) + IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'` + if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then + IBM_ARCH=rs6000 + else + IBM_ARCH=powerpc + fi + if [ -x /usr/bin/oslevel ] ; then + IBM_REV=`/usr/bin/oslevel` + else + IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} + fi + echo ${IBM_ARCH}-ibm-aix${IBM_REV} + exit ;; + *:AIX:*:*) + echo rs6000-ibm-aix + exit ;; + ibmrt:4.4BSD:*|romp-ibm:BSD:*) + echo romp-ibm-bsd4.4 + exit ;; + ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and + echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to + exit ;; # report: romp-ibm BSD 4.3 + *:BOSX:*:*) + echo rs6000-bull-bosx + exit ;; + DPX/2?00:B.O.S.:*:*) + echo m68k-bull-sysv3 + exit ;; + 9000/[34]??:4.3bsd:1.*:*) + echo m68k-hp-bsd + exit ;; + hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*) + echo m68k-hp-bsd4.4 + exit ;; + 9000/[34678]??:HP-UX:*:*) + HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` + case "${UNAME_MACHINE}" in + 9000/31? ) HP_ARCH=m68000 ;; + 9000/[34]?? ) HP_ARCH=m68k ;; + 9000/[678][0-9][0-9]) + if [ -x /usr/bin/getconf ]; then + sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null` + sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null` + case "${sc_cpu_version}" in + 523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0 + 528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1 + 532) # CPU_PA_RISC2_0 + case "${sc_kernel_bits}" in + 32) HP_ARCH="hppa2.0n" ;; + 64) HP_ARCH="hppa2.0w" ;; + '') HP_ARCH="hppa2.0" ;; # HP-UX 10.20 + esac ;; + esac + fi + if [ "${HP_ARCH}" = "" ]; then + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + + #define _HPUX_SOURCE + #include + #include + + int main () + { + #if defined(_SC_KERNEL_BITS) + long bits = sysconf(_SC_KERNEL_BITS); + #endif + long cpu = sysconf (_SC_CPU_VERSION); + + switch (cpu) + { + case CPU_PA_RISC1_0: puts ("hppa1.0"); break; + case CPU_PA_RISC1_1: puts ("hppa1.1"); break; + case CPU_PA_RISC2_0: + #if defined(_SC_KERNEL_BITS) + switch (bits) + { + case 64: puts ("hppa2.0w"); break; + case 32: puts ("hppa2.0n"); break; + default: puts ("hppa2.0"); break; + } break; + #else /* !defined(_SC_KERNEL_BITS) */ + puts ("hppa2.0"); break; + #endif + default: puts ("hppa1.0"); break; + } + exit (0); + } +EOF + (CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy` + test -z "$HP_ARCH" && HP_ARCH=hppa + fi ;; + esac + if [ ${HP_ARCH} = "hppa2.0w" ] + then + eval $set_cc_for_build + + # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating + # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler + # generating 64-bit code. GNU and HP use different nomenclature: + # + # $ CC_FOR_BUILD=cc ./config.guess + # => hppa2.0w-hp-hpux11.23 + # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess + # => hppa64-hp-hpux11.23 + + if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | + grep -q __LP64__ + then + HP_ARCH="hppa2.0w" + else + HP_ARCH="hppa64" + fi + fi + echo ${HP_ARCH}-hp-hpux${HPUX_REV} + exit ;; + ia64:HP-UX:*:*) + HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` + echo ia64-hp-hpux${HPUX_REV} + exit ;; + 3050*:HI-UX:*:*) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #include + int + main () + { + long cpu = sysconf (_SC_CPU_VERSION); + /* The order matters, because CPU_IS_HP_MC68K erroneously returns + true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct + results, however. */ + if (CPU_IS_PA_RISC (cpu)) + { + switch (cpu) + { + case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break; + case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break; + case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break; + default: puts ("hppa-hitachi-hiuxwe2"); break; + } + } + else if (CPU_IS_HP_MC68K (cpu)) + puts ("m68k-hitachi-hiuxwe2"); + else puts ("unknown-hitachi-hiuxwe2"); + exit (0); + } +EOF + $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` && + { echo "$SYSTEM_NAME"; exit; } + echo unknown-hitachi-hiuxwe2 + exit ;; + 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* ) + echo hppa1.1-hp-bsd + exit ;; + 9000/8??:4.3bsd:*:*) + echo hppa1.0-hp-bsd + exit ;; + *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*) + echo hppa1.0-hp-mpeix + exit ;; + hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* ) + echo hppa1.1-hp-osf + exit ;; + hp8??:OSF1:*:*) + echo hppa1.0-hp-osf + exit ;; + i*86:OSF1:*:*) + if [ -x /usr/sbin/sysversion ] ; then + echo ${UNAME_MACHINE}-unknown-osf1mk + else + echo ${UNAME_MACHINE}-unknown-osf1 + fi + exit ;; + parisc*:Lites*:*:*) + echo hppa1.1-hp-lites + exit ;; + C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*) + echo c1-convex-bsd + exit ;; + C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*) + if getsysinfo -f scalar_acc + then echo c32-convex-bsd + else echo c2-convex-bsd + fi + exit ;; + C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*) + echo c34-convex-bsd + exit ;; + C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*) + echo c38-convex-bsd + exit ;; + C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*) + echo c4-convex-bsd + exit ;; + CRAY*Y-MP:*:*:*) + echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*[A-Z]90:*:*:*) + echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \ + | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \ + -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \ + -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*TS:*:*:*) + echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*T3E:*:*:*) + echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*SV1:*:*:*) + echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + *:UNICOS/mp:*:*) + echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*) + FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` + FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` + FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'` + echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" + exit ;; + 5000:UNIX_System_V:4.*:*) + FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` + FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'` + echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" + exit ;; + i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*) + echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE} + exit ;; + sparc*:BSD/OS:*:*) + echo sparc-unknown-bsdi${UNAME_RELEASE} + exit ;; + *:BSD/OS:*:*) + echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE} + exit ;; + *:FreeBSD:*:*) + UNAME_PROCESSOR=`/usr/bin/uname -p` + case ${UNAME_PROCESSOR} in + amd64) + echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; + *) + echo ${UNAME_PROCESSOR}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; + esac + exit ;; + i*:CYGWIN*:*) + echo ${UNAME_MACHINE}-pc-cygwin + exit ;; + *:MINGW*:*) + echo ${UNAME_MACHINE}-pc-mingw32 + exit ;; + i*:MSYS*:*) + echo ${UNAME_MACHINE}-pc-msys + exit ;; + i*:windows32*:*) + # uname -m includes "-pc" on this system. + echo ${UNAME_MACHINE}-mingw32 + exit ;; + i*:PW*:*) + echo ${UNAME_MACHINE}-pc-pw32 + exit ;; + *:Interix*:*) + case ${UNAME_MACHINE} in + x86) + echo i586-pc-interix${UNAME_RELEASE} + exit ;; + authenticamd | genuineintel | EM64T) + echo x86_64-unknown-interix${UNAME_RELEASE} + exit ;; + IA64) + echo ia64-unknown-interix${UNAME_RELEASE} + exit ;; + esac ;; + [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*) + echo i${UNAME_MACHINE}-pc-mks + exit ;; + 8664:Windows_NT:*) + echo x86_64-pc-mks + exit ;; + i*:Windows_NT*:* | Pentium*:Windows_NT*:*) + # How do we know it's Interix rather than the generic POSIX subsystem? + # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we + # UNAME_MACHINE based on the output of uname instead of i386? + echo i586-pc-interix + exit ;; + i*:UWIN*:*) + echo ${UNAME_MACHINE}-pc-uwin + exit ;; + amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*) + echo x86_64-unknown-cygwin + exit ;; + p*:CYGWIN*:*) + echo powerpcle-unknown-cygwin + exit ;; + prep*:SunOS:5.*:*) + echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + *:GNU:*:*) + # the GNU system + echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-gnu`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'` + exit ;; + *:GNU/*:*:*) + # other systems with GNU libc and userland + echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-gnu + exit ;; + i*86:Minix:*:*) + echo ${UNAME_MACHINE}-pc-minix + exit ;; + alpha:Linux:*:*) + case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in + EV5) UNAME_MACHINE=alphaev5 ;; + EV56) UNAME_MACHINE=alphaev56 ;; + PCA56) UNAME_MACHINE=alphapca56 ;; + PCA57) UNAME_MACHINE=alphapca56 ;; + EV6) UNAME_MACHINE=alphaev6 ;; + EV67) UNAME_MACHINE=alphaev67 ;; + EV68*) UNAME_MACHINE=alphaev68 ;; + esac + objdump --private-headers /bin/sh | grep -q ld.so.1 + if test "$?" = 0 ; then LIBC="libc1" ; else LIBC="" ; fi + echo ${UNAME_MACHINE}-unknown-linux-gnu${LIBC} + exit ;; + arm*:Linux:*:*) + eval $set_cc_for_build + if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ARM_EABI__ + then + echo ${UNAME_MACHINE}-unknown-linux-gnu + else + if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ARM_PCS_VFP + then + echo ${UNAME_MACHINE}-unknown-linux-gnueabi + else + echo ${UNAME_MACHINE}-unknown-linux-gnueabihf + fi + fi + exit ;; + avr32*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + cris:Linux:*:*) + echo ${UNAME_MACHINE}-axis-linux-gnu + exit ;; + crisv32:Linux:*:*) + echo ${UNAME_MACHINE}-axis-linux-gnu + exit ;; + frv:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + hexagon:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + i*86:Linux:*:*) + LIBC=gnu + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #ifdef __dietlibc__ + LIBC=dietlibc + #endif +EOF + eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC'` + echo "${UNAME_MACHINE}-pc-linux-${LIBC}" + exit ;; + ia64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + m32r*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + m68*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + mips:Linux:*:* | mips64:Linux:*:*) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #undef CPU + #undef ${UNAME_MACHINE} + #undef ${UNAME_MACHINE}el + #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) + CPU=${UNAME_MACHINE}el + #else + #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) + CPU=${UNAME_MACHINE} + #else + CPU= + #endif + #endif +EOF + eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'` + test x"${CPU}" != x && { echo "${CPU}-unknown-linux-gnu"; exit; } + ;; + or32:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + padre:Linux:*:*) + echo sparc-unknown-linux-gnu + exit ;; + parisc64:Linux:*:* | hppa64:Linux:*:*) + echo hppa64-unknown-linux-gnu + exit ;; + parisc:Linux:*:* | hppa:Linux:*:*) + # Look for CPU level + case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in + PA7*) echo hppa1.1-unknown-linux-gnu ;; + PA8*) echo hppa2.0-unknown-linux-gnu ;; + *) echo hppa-unknown-linux-gnu ;; + esac + exit ;; + ppc64:Linux:*:*) + echo powerpc64-unknown-linux-gnu + exit ;; + ppc:Linux:*:*) + echo powerpc-unknown-linux-gnu + exit ;; + s390:Linux:*:* | s390x:Linux:*:*) + echo ${UNAME_MACHINE}-ibm-linux + exit ;; + sh64*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + sh*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + sparc:Linux:*:* | sparc64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + tile*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + vax:Linux:*:*) + echo ${UNAME_MACHINE}-dec-linux-gnu + exit ;; + x86_64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + xtensa*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + i*86:DYNIX/ptx:4*:*) + # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. + # earlier versions are messed up and put the nodename in both + # sysname and nodename. + echo i386-sequent-sysv4 + exit ;; + i*86:UNIX_SV:4.2MP:2.*) + # Unixware is an offshoot of SVR4, but it has its own version + # number series starting with 2... + # I am not positive that other SVR4 systems won't match this, + # I just have to hope. -- rms. + # Use sysv4.2uw... so that sysv4* matches it. + echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION} + exit ;; + i*86:OS/2:*:*) + # If we were able to find `uname', then EMX Unix compatibility + # is probably installed. + echo ${UNAME_MACHINE}-pc-os2-emx + exit ;; + i*86:XTS-300:*:STOP) + echo ${UNAME_MACHINE}-unknown-stop + exit ;; + i*86:atheos:*:*) + echo ${UNAME_MACHINE}-unknown-atheos + exit ;; + i*86:syllable:*:*) + echo ${UNAME_MACHINE}-pc-syllable + exit ;; + i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*) + echo i386-unknown-lynxos${UNAME_RELEASE} + exit ;; + i*86:*DOS:*:*) + echo ${UNAME_MACHINE}-pc-msdosdjgpp + exit ;; + i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*) + UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'` + if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then + echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL} + else + echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL} + fi + exit ;; + i*86:*:5:[678]*) + # UnixWare 7.x, OpenUNIX and OpenServer 6. + case `/bin/uname -X | grep "^Machine"` in + *486*) UNAME_MACHINE=i486 ;; + *Pentium) UNAME_MACHINE=i586 ;; + *Pent*|*Celeron) UNAME_MACHINE=i686 ;; + esac + echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION} + exit ;; + i*86:*:3.2:*) + if test -f /usr/options/cb.name; then + UNAME_REL=`sed -n 's/.*Version //p' /dev/null >/dev/null ; then + UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')` + (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486 + (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \ + && UNAME_MACHINE=i586 + (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \ + && UNAME_MACHINE=i686 + (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \ + && UNAME_MACHINE=i686 + echo ${UNAME_MACHINE}-pc-sco$UNAME_REL + else + echo ${UNAME_MACHINE}-pc-sysv32 + fi + exit ;; + pc:*:*:*) + # Left here for compatibility: + # uname -m prints for DJGPP always 'pc', but it prints nothing about + # the processor, so we play safe by assuming i586. + # Note: whatever this is, it MUST be the same as what config.sub + # prints for the "djgpp" host, or else GDB configury will decide that + # this is a cross-build. + echo i586-pc-msdosdjgpp + exit ;; + Intel:Mach:3*:*) + echo i386-pc-mach3 + exit ;; + paragon:*:*:*) + echo i860-intel-osf1 + exit ;; + i860:*:4.*:*) # i860-SVR4 + if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then + echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4 + else # Add other i860-SVR4 vendors below as they are discovered. + echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4 + fi + exit ;; + mini*:CTIX:SYS*5:*) + # "miniframe" + echo m68010-convergent-sysv + exit ;; + mc68k:UNIX:SYSTEM5:3.51m) + echo m68k-convergent-sysv + exit ;; + M680?0:D-NIX:5.3:*) + echo m68k-diab-dnix + exit ;; + M68*:*:R3V[5678]*:*) + test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;; + 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0) + OS_REL='' + test -r /etc/.relid \ + && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4.3${OS_REL}; exit; } + /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ + && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; + 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*) + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4; exit; } ;; + NCR*:*:4.2:* | MPRAS*:*:4.2:*) + OS_REL='.3' + test -r /etc/.relid \ + && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4.3${OS_REL}; exit; } + /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ + && { echo i586-ncr-sysv4.3${OS_REL}; exit; } + /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \ + && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; + m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*) + echo m68k-unknown-lynxos${UNAME_RELEASE} + exit ;; + mc68030:UNIX_System_V:4.*:*) + echo m68k-atari-sysv4 + exit ;; + TSUNAMI:LynxOS:2.*:*) + echo sparc-unknown-lynxos${UNAME_RELEASE} + exit ;; + rs6000:LynxOS:2.*:*) + echo rs6000-unknown-lynxos${UNAME_RELEASE} + exit ;; + PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*) + echo powerpc-unknown-lynxos${UNAME_RELEASE} + exit ;; + SM[BE]S:UNIX_SV:*:*) + echo mips-dde-sysv${UNAME_RELEASE} + exit ;; + RM*:ReliantUNIX-*:*:*) + echo mips-sni-sysv4 + exit ;; + RM*:SINIX-*:*:*) + echo mips-sni-sysv4 + exit ;; + *:SINIX-*:*:*) + if uname -p 2>/dev/null >/dev/null ; then + UNAME_MACHINE=`(uname -p) 2>/dev/null` + echo ${UNAME_MACHINE}-sni-sysv4 + else + echo ns32k-sni-sysv + fi + exit ;; + PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort + # says + echo i586-unisys-sysv4 + exit ;; + *:UNIX_System_V:4*:FTX*) + # From Gerald Hewes . + # How about differentiating between stratus architectures? -djm + echo hppa1.1-stratus-sysv4 + exit ;; + *:*:*:FTX*) + # From seanf@swdc.stratus.com. + echo i860-stratus-sysv4 + exit ;; + i*86:VOS:*:*) + # From Paul.Green@stratus.com. + echo ${UNAME_MACHINE}-stratus-vos + exit ;; + *:VOS:*:*) + # From Paul.Green@stratus.com. + echo hppa1.1-stratus-vos + exit ;; + mc68*:A/UX:*:*) + echo m68k-apple-aux${UNAME_RELEASE} + exit ;; + news*:NEWS-OS:6*:*) + echo mips-sony-newsos6 + exit ;; + R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*) + if [ -d /usr/nec ]; then + echo mips-nec-sysv${UNAME_RELEASE} + else + echo mips-unknown-sysv${UNAME_RELEASE} + fi + exit ;; + BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only. + echo powerpc-be-beos + exit ;; + BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only. + echo powerpc-apple-beos + exit ;; + BePC:BeOS:*:*) # BeOS running on Intel PC compatible. + echo i586-pc-beos + exit ;; + BePC:Haiku:*:*) # Haiku running on Intel PC compatible. + echo i586-pc-haiku + exit ;; + SX-4:SUPER-UX:*:*) + echo sx4-nec-superux${UNAME_RELEASE} + exit ;; + SX-5:SUPER-UX:*:*) + echo sx5-nec-superux${UNAME_RELEASE} + exit ;; + SX-6:SUPER-UX:*:*) + echo sx6-nec-superux${UNAME_RELEASE} + exit ;; + SX-7:SUPER-UX:*:*) + echo sx7-nec-superux${UNAME_RELEASE} + exit ;; + SX-8:SUPER-UX:*:*) + echo sx8-nec-superux${UNAME_RELEASE} + exit ;; + SX-8R:SUPER-UX:*:*) + echo sx8r-nec-superux${UNAME_RELEASE} + exit ;; + Power*:Rhapsody:*:*) + echo powerpc-apple-rhapsody${UNAME_RELEASE} + exit ;; + *:Rhapsody:*:*) + echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE} + exit ;; + *:Darwin:*:*) + UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown + case $UNAME_PROCESSOR in + i386) + eval $set_cc_for_build + if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then + if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \ + (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_64BIT_ARCH >/dev/null + then + UNAME_PROCESSOR="x86_64" + fi + fi ;; + unknown) UNAME_PROCESSOR=powerpc ;; + esac + echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE} + exit ;; + *:procnto*:*:* | *:QNX:[0123456789]*:*) + UNAME_PROCESSOR=`uname -p` + if test "$UNAME_PROCESSOR" = "x86"; then + UNAME_PROCESSOR=i386 + UNAME_MACHINE=pc + fi + echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE} + exit ;; + *:QNX:*:4*) + echo i386-pc-qnx + exit ;; + NEO-?:NONSTOP_KERNEL:*:*) + echo neo-tandem-nsk${UNAME_RELEASE} + exit ;; + NSE-?:NONSTOP_KERNEL:*:*) + echo nse-tandem-nsk${UNAME_RELEASE} + exit ;; + NSR-?:NONSTOP_KERNEL:*:*) + echo nsr-tandem-nsk${UNAME_RELEASE} + exit ;; + *:NonStop-UX:*:*) + echo mips-compaq-nonstopux + exit ;; + BS2000:POSIX*:*:*) + echo bs2000-siemens-sysv + exit ;; + DS/*:UNIX_System_V:*:*) + echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE} + exit ;; + *:Plan9:*:*) + # "uname -m" is not consistent, so use $cputype instead. 386 + # is converted to i386 for consistency with other x86 + # operating systems. + if test "$cputype" = "386"; then + UNAME_MACHINE=i386 + else + UNAME_MACHINE="$cputype" + fi + echo ${UNAME_MACHINE}-unknown-plan9 + exit ;; + *:TOPS-10:*:*) + echo pdp10-unknown-tops10 + exit ;; + *:TENEX:*:*) + echo pdp10-unknown-tenex + exit ;; + KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*) + echo pdp10-dec-tops20 + exit ;; + XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*) + echo pdp10-xkl-tops20 + exit ;; + *:TOPS-20:*:*) + echo pdp10-unknown-tops20 + exit ;; + *:ITS:*:*) + echo pdp10-unknown-its + exit ;; + SEI:*:*:SEIUX) + echo mips-sei-seiux${UNAME_RELEASE} + exit ;; + *:DragonFly:*:*) + echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` + exit ;; + *:*VMS:*:*) + UNAME_MACHINE=`(uname -p) 2>/dev/null` + case "${UNAME_MACHINE}" in + A*) echo alpha-dec-vms ; exit ;; + I*) echo ia64-dec-vms ; exit ;; + V*) echo vax-dec-vms ; exit ;; + esac ;; + *:XENIX:*:SysV) + echo i386-pc-xenix + exit ;; + i*86:skyos:*:*) + echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE}` | sed -e 's/ .*$//' + exit ;; + i*86:rdos:*:*) + echo ${UNAME_MACHINE}-pc-rdos + exit ;; + i*86:AROS:*:*) + echo ${UNAME_MACHINE}-pc-aros + exit ;; +esac + +#echo '(No uname command or uname output not recognized.)' 1>&2 +#echo "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" 1>&2 + +eval $set_cc_for_build +cat >$dummy.c < +# include +#endif +main () +{ +#if defined (sony) +#if defined (MIPSEB) + /* BFD wants "bsd" instead of "newsos". Perhaps BFD should be changed, + I don't know.... */ + printf ("mips-sony-bsd\n"); exit (0); +#else +#include + printf ("m68k-sony-newsos%s\n", +#ifdef NEWSOS4 + "4" +#else + "" +#endif + ); exit (0); +#endif +#endif + +#if defined (__arm) && defined (__acorn) && defined (__unix) + printf ("arm-acorn-riscix\n"); exit (0); +#endif + +#if defined (hp300) && !defined (hpux) + printf ("m68k-hp-bsd\n"); exit (0); +#endif + +#if defined (NeXT) +#if !defined (__ARCHITECTURE__) +#define __ARCHITECTURE__ "m68k" +#endif + int version; + version=`(hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null`; + if (version < 4) + printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version); + else + printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version); + exit (0); +#endif + +#if defined (MULTIMAX) || defined (n16) +#if defined (UMAXV) + printf ("ns32k-encore-sysv\n"); exit (0); +#else +#if defined (CMU) + printf ("ns32k-encore-mach\n"); exit (0); +#else + printf ("ns32k-encore-bsd\n"); exit (0); +#endif +#endif +#endif + +#if defined (__386BSD__) + printf ("i386-pc-bsd\n"); exit (0); +#endif + +#if defined (sequent) +#if defined (i386) + printf ("i386-sequent-dynix\n"); exit (0); +#endif +#if defined (ns32000) + printf ("ns32k-sequent-dynix\n"); exit (0); +#endif +#endif + +#if defined (_SEQUENT_) + struct utsname un; + + uname(&un); + + if (strncmp(un.version, "V2", 2) == 0) { + printf ("i386-sequent-ptx2\n"); exit (0); + } + if (strncmp(un.version, "V1", 2) == 0) { /* XXX is V1 correct? */ + printf ("i386-sequent-ptx1\n"); exit (0); + } + printf ("i386-sequent-ptx\n"); exit (0); + +#endif + +#if defined (vax) +# if !defined (ultrix) +# include +# if defined (BSD) +# if BSD == 43 + printf ("vax-dec-bsd4.3\n"); exit (0); +# else +# if BSD == 199006 + printf ("vax-dec-bsd4.3reno\n"); exit (0); +# else + printf ("vax-dec-bsd\n"); exit (0); +# endif +# endif +# else + printf ("vax-dec-bsd\n"); exit (0); +# endif +# else + printf ("vax-dec-ultrix\n"); exit (0); +# endif +#endif + +#if defined (alliant) && defined (i860) + printf ("i860-alliant-bsd\n"); exit (0); +#endif + + exit (1); +} +EOF + +$CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null && SYSTEM_NAME=`$dummy` && + { echo "$SYSTEM_NAME"; exit; } + +# Apollos put the system type in the environment. + +test -d /usr/apollo && { echo ${ISP}-apollo-${SYSTYPE}; exit; } + +# Convex versions that predate uname can use getsysinfo(1) + +if [ -x /usr/convex/getsysinfo ] +then + case `getsysinfo -f cpu_type` in + c1*) + echo c1-convex-bsd + exit ;; + c2*) + if getsysinfo -f scalar_acc + then echo c32-convex-bsd + else echo c2-convex-bsd + fi + exit ;; + c34*) + echo c34-convex-bsd + exit ;; + c38*) + echo c38-convex-bsd + exit ;; + c4*) + echo c4-convex-bsd + exit ;; + esac +fi + +cat >&2 < in order to provide the needed +information to handle your system. + +config.guess timestamp = $timestamp + +uname -m = `(uname -m) 2>/dev/null || echo unknown` +uname -r = `(uname -r) 2>/dev/null || echo unknown` +uname -s = `(uname -s) 2>/dev/null || echo unknown` +uname -v = `(uname -v) 2>/dev/null || echo unknown` + +/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null` +/bin/uname -X = `(/bin/uname -X) 2>/dev/null` + +hostinfo = `(hostinfo) 2>/dev/null` +/bin/universe = `(/bin/universe) 2>/dev/null` +/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null` +/bin/arch = `(/bin/arch) 2>/dev/null` +/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null` +/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null` + +UNAME_MACHINE = ${UNAME_MACHINE} +UNAME_RELEASE = ${UNAME_RELEASE} +UNAME_SYSTEM = ${UNAME_SYSTEM} +UNAME_VERSION = ${UNAME_VERSION} +EOF + +exit 1 + +# Local variables: +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "timestamp='" +# time-stamp-format: "%:y-%02m-%02d" +# time-stamp-end: "'" +# End: diff --git a/Zoltan-3.90/config/config.sub b/Zoltan-3.90/config/config.sub new file mode 100755 index 00000000..d6b6b3c7 --- /dev/null +++ b/Zoltan-3.90/config/config.sub @@ -0,0 +1,1766 @@ +#! /bin/sh +# Configuration validation subroutine script. +# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, +# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, +# 2011, 2012 Free Software Foundation, Inc. + +timestamp='2012-01-01' + +# This file is (in principle) common to ALL GNU software. +# The presence of a machine in this file suggests that SOME GNU software +# can handle that machine. It does not imply ALL GNU software can. +# +# This file is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA +# 02110-1301, USA. +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + + +# Please send patches to . Submit a context +# diff and a properly formatted GNU ChangeLog entry. +# +# Configuration subroutine to validate and canonicalize a configuration type. +# Supply the specified configuration type as an argument. +# If it is invalid, we print an error message on stderr and exit with code 1. +# Otherwise, we print the canonical config type on stdout and succeed. + +# You can get the latest version of this script from: +# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD + +# This file is supposed to be the same for all GNU packages +# and recognize all the CPU types, system types and aliases +# that are meaningful with *any* GNU software. +# Each package is responsible for reporting which valid configurations +# it does not support. The user should be able to distinguish +# a failure to support a valid configuration from a meaningless +# configuration. + +# The goal of this file is to map all the various variations of a given +# machine specification into a single specification in the form: +# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM +# or in some cases, the newer four-part form: +# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM +# It is wrong to echo any other type of specification. + +me=`echo "$0" | sed -e 's,.*/,,'` + +usage="\ +Usage: $0 [OPTION] CPU-MFR-OPSYS + $0 [OPTION] ALIAS + +Canonicalize a configuration name. + +Operation modes: + -h, --help print this help, then exit + -t, --time-stamp print date of last modification, then exit + -v, --version print version number, then exit + +Report bugs and patches to ." + +version="\ +GNU config.sub ($timestamp) + +Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, +2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 +Free Software Foundation, Inc. + +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." + +help=" +Try \`$me --help' for more information." + +# Parse command line +while test $# -gt 0 ; do + case $1 in + --time-stamp | --time* | -t ) + echo "$timestamp" ; exit ;; + --version | -v ) + echo "$version" ; exit ;; + --help | --h* | -h ) + echo "$usage"; exit ;; + -- ) # Stop option processing + shift; break ;; + - ) # Use stdin as input. + break ;; + -* ) + echo "$me: invalid option $1$help" + exit 1 ;; + + *local*) + # First pass through any local machine types. + echo $1 + exit ;; + + * ) + break ;; + esac +done + +case $# in + 0) echo "$me: missing argument$help" >&2 + exit 1;; + 1) ;; + *) echo "$me: too many arguments$help" >&2 + exit 1;; +esac + +# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any). +# Here we must recognize all the valid KERNEL-OS combinations. +maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'` +case $maybe_os in + nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \ + linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \ + knetbsd*-gnu* | netbsd*-gnu* | \ + kopensolaris*-gnu* | \ + storm-chaos* | os2-emx* | rtmk-nova*) + os=-$maybe_os + basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'` + ;; + *) + basic_machine=`echo $1 | sed 's/-[^-]*$//'` + if [ $basic_machine != $1 ] + then os=`echo $1 | sed 's/.*-/-/'` + else os=; fi + ;; +esac + +### Let's recognize common machines as not being operating systems so +### that things like config.sub decstation-3100 work. We also +### recognize some manufacturers as not being operating systems, so we +### can provide default operating systems below. +case $os in + -sun*os*) + # Prevent following clause from handling this invalid input. + ;; + -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \ + -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \ + -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \ + -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\ + -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \ + -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \ + -apple | -axis | -knuth | -cray | -microblaze) + os= + basic_machine=$1 + ;; + -bluegene*) + os=-cnk + ;; + -sim | -cisco | -oki | -wec | -winbond) + os= + basic_machine=$1 + ;; + -scout) + ;; + -wrs) + os=-vxworks + basic_machine=$1 + ;; + -chorusos*) + os=-chorusos + basic_machine=$1 + ;; + -chorusrdb) + os=-chorusrdb + basic_machine=$1 + ;; + -hiux*) + os=-hiuxwe2 + ;; + -sco6) + os=-sco5v6 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco5) + os=-sco3.2v5 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco4) + os=-sco3.2v4 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco3.2.[4-9]*) + os=`echo $os | sed -e 's/sco3.2./sco3.2v/'` + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco3.2v[4-9]*) + # Don't forget version if it is 3.2v4 or newer. + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco5v6*) + # Don't forget version if it is 3.2v4 or newer. + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco*) + os=-sco3.2v2 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -udk*) + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -isc) + os=-isc2.2 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -clix*) + basic_machine=clipper-intergraph + ;; + -isc*) + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -lynx*) + os=-lynxos + ;; + -ptx*) + basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'` + ;; + -windowsnt*) + os=`echo $os | sed -e 's/windowsnt/winnt/'` + ;; + -psos*) + os=-psos + ;; + -mint | -mint[0-9]*) + basic_machine=m68k-atari + os=-mint + ;; +esac + +# Decode aliases for certain CPU-COMPANY combinations. +case $basic_machine in + # Recognize the basic CPU types without company name. + # Some are omitted here because they have special meanings below. + 1750a | 580 \ + | a29k \ + | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \ + | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \ + | am33_2.0 \ + | arc | arm | arm[bl]e | arme[lb] | armv[2345] | armv[345][lb] | avr | avr32 \ + | be32 | be64 \ + | bfin \ + | c4x | clipper \ + | d10v | d30v | dlx | dsp16xx \ + | epiphany \ + | fido | fr30 | frv \ + | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \ + | hexagon \ + | i370 | i860 | i960 | ia64 \ + | ip2k | iq2000 \ + | le32 | le64 \ + | lm32 \ + | m32c | m32r | m32rle | m68000 | m68k | m88k \ + | maxq | mb | microblaze | mcore | mep | metag \ + | mips | mipsbe | mipseb | mipsel | mipsle \ + | mips16 \ + | mips64 | mips64el \ + | mips64octeon | mips64octeonel \ + | mips64orion | mips64orionel \ + | mips64r5900 | mips64r5900el \ + | mips64vr | mips64vrel \ + | mips64vr4100 | mips64vr4100el \ + | mips64vr4300 | mips64vr4300el \ + | mips64vr5000 | mips64vr5000el \ + | mips64vr5900 | mips64vr5900el \ + | mipsisa32 | mipsisa32el \ + | mipsisa32r2 | mipsisa32r2el \ + | mipsisa64 | mipsisa64el \ + | mipsisa64r2 | mipsisa64r2el \ + | mipsisa64sb1 | mipsisa64sb1el \ + | mipsisa64sr71k | mipsisa64sr71kel \ + | mipstx39 | mipstx39el \ + | mn10200 | mn10300 \ + | moxie \ + | mt \ + | msp430 \ + | nds32 | nds32le | nds32be \ + | nios | nios2 \ + | ns16k | ns32k \ + | open8 \ + | or32 \ + | pdp10 | pdp11 | pj | pjl \ + | powerpc | powerpc64 | powerpc64le | powerpcle \ + | pyramid \ + | rl78 | rx \ + | score \ + | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \ + | sh64 | sh64le \ + | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \ + | sparcv8 | sparcv9 | sparcv9b | sparcv9v \ + | spu \ + | tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \ + | ubicom32 \ + | v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \ + | we32k \ + | x86 | xc16x | xstormy16 | xtensa \ + | z8k | z80) + basic_machine=$basic_machine-unknown + ;; + c54x) + basic_machine=tic54x-unknown + ;; + c55x) + basic_machine=tic55x-unknown + ;; + c6x) + basic_machine=tic6x-unknown + ;; + m6811 | m68hc11 | m6812 | m68hc12 | picochip) + basic_machine=$basic_machine-unknown + os=-none + ;; + m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k) + ;; + ms1) + basic_machine=mt-unknown + ;; + + strongarm | thumb | xscale) + basic_machine=arm-unknown + ;; + + xscaleeb) + basic_machine=armeb-unknown + ;; + + xscaleel) + basic_machine=armel-unknown + ;; + + # We use `pc' rather than `unknown' + # because (1) that's what they normally are, and + # (2) the word "unknown" tends to confuse beginning users. + i*86 | x86_64) + basic_machine=$basic_machine-pc + ;; + # Object if more than one company name word. + *-*-*) + echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 + exit 1 + ;; + # Recognize the basic CPU types with company name. + 580-* \ + | a29k-* \ + | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \ + | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \ + | alphapca5[67]-* | alpha64pca5[67]-* | arc-* \ + | arm-* | armbe-* | armle-* | armeb-* | armv*-* \ + | avr-* | avr32-* \ + | be32-* | be64-* \ + | bfin-* | bs2000-* \ + | c[123]* | c30-* | [cjt]90-* | c4x-* \ + | clipper-* | craynv-* | cydra-* \ + | d10v-* | d30v-* | dlx-* \ + | elxsi-* \ + | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \ + | h8300-* | h8500-* \ + | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \ + | hexagon-* \ + | i*86-* | i860-* | i960-* | ia64-* \ + | ip2k-* | iq2000-* \ + | le32-* | le64-* \ + | lm32-* \ + | m32c-* | m32r-* | m32rle-* \ + | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \ + | m88110-* | m88k-* | maxq-* | mcore-* | metag-* | microblaze-* \ + | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \ + | mips16-* \ + | mips64-* | mips64el-* \ + | mips64octeon-* | mips64octeonel-* \ + | mips64orion-* | mips64orionel-* \ + | mips64r5900-* | mips64r5900el-* \ + | mips64vr-* | mips64vrel-* \ + | mips64vr4100-* | mips64vr4100el-* \ + | mips64vr4300-* | mips64vr4300el-* \ + | mips64vr5000-* | mips64vr5000el-* \ + | mips64vr5900-* | mips64vr5900el-* \ + | mipsisa32-* | mipsisa32el-* \ + | mipsisa32r2-* | mipsisa32r2el-* \ + | mipsisa64-* | mipsisa64el-* \ + | mipsisa64r2-* | mipsisa64r2el-* \ + | mipsisa64sb1-* | mipsisa64sb1el-* \ + | mipsisa64sr71k-* | mipsisa64sr71kel-* \ + | mipstx39-* | mipstx39el-* \ + | mmix-* \ + | mt-* \ + | msp430-* \ + | nds32-* | nds32le-* | nds32be-* \ + | nios-* | nios2-* \ + | none-* | np1-* | ns16k-* | ns32k-* \ + | open8-* \ + | orion-* \ + | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \ + | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \ + | pyramid-* \ + | rl78-* | romp-* | rs6000-* | rx-* \ + | sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \ + | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \ + | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \ + | sparclite-* \ + | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx?-* \ + | tahoe-* \ + | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \ + | tile*-* \ + | tron-* \ + | ubicom32-* \ + | v850-* | v850e-* | v850e1-* | v850es-* | v850e2-* | v850e2v3-* \ + | vax-* \ + | we32k-* \ + | x86-* | x86_64-* | xc16x-* | xps100-* \ + | xstormy16-* | xtensa*-* \ + | ymp-* \ + | z8k-* | z80-*) + ;; + # Recognize the basic CPU types without company name, with glob match. + xtensa*) + basic_machine=$basic_machine-unknown + ;; + # Recognize the various machine names and aliases which stand + # for a CPU type and a company and sometimes even an OS. + 386bsd) + basic_machine=i386-unknown + os=-bsd + ;; + 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc) + basic_machine=m68000-att + ;; + 3b*) + basic_machine=we32k-att + ;; + a29khif) + basic_machine=a29k-amd + os=-udi + ;; + abacus) + basic_machine=abacus-unknown + ;; + adobe68k) + basic_machine=m68010-adobe + os=-scout + ;; + alliant | fx80) + basic_machine=fx80-alliant + ;; + altos | altos3068) + basic_machine=m68k-altos + ;; + am29k) + basic_machine=a29k-none + os=-bsd + ;; + amd64) + basic_machine=x86_64-pc + ;; + amd64-*) + basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + amdahl) + basic_machine=580-amdahl + os=-sysv + ;; + amiga | amiga-*) + basic_machine=m68k-unknown + ;; + amigaos | amigados) + basic_machine=m68k-unknown + os=-amigaos + ;; + amigaunix | amix) + basic_machine=m68k-unknown + os=-sysv4 + ;; + apollo68) + basic_machine=m68k-apollo + os=-sysv + ;; + apollo68bsd) + basic_machine=m68k-apollo + os=-bsd + ;; + aros) + basic_machine=i386-pc + os=-aros + ;; + aux) + basic_machine=m68k-apple + os=-aux + ;; + balance) + basic_machine=ns32k-sequent + os=-dynix + ;; + blackfin) + basic_machine=bfin-unknown + os=-linux + ;; + blackfin-*) + basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'` + os=-linux + ;; + bluegene*) + basic_machine=powerpc-ibm + os=-cnk + ;; + c54x-*) + basic_machine=tic54x-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + c55x-*) + basic_machine=tic55x-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + c6x-*) + basic_machine=tic6x-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + c90) + basic_machine=c90-cray + os=-unicos + ;; + cegcc) + basic_machine=arm-unknown + os=-cegcc + ;; + convex-c1) + basic_machine=c1-convex + os=-bsd + ;; + convex-c2) + basic_machine=c2-convex + os=-bsd + ;; + convex-c32) + basic_machine=c32-convex + os=-bsd + ;; + convex-c34) + basic_machine=c34-convex + os=-bsd + ;; + convex-c38) + basic_machine=c38-convex + os=-bsd + ;; + cray | j90) + basic_machine=j90-cray + os=-unicos + ;; + craynv) + basic_machine=craynv-cray + os=-unicosmp + ;; + cr16 | cr16-*) + basic_machine=cr16-unknown + os=-elf + ;; + crds | unos) + basic_machine=m68k-crds + ;; + crisv32 | crisv32-* | etraxfs*) + basic_machine=crisv32-axis + ;; + cris | cris-* | etrax*) + basic_machine=cris-axis + ;; + crx) + basic_machine=crx-unknown + os=-elf + ;; + da30 | da30-*) + basic_machine=m68k-da30 + ;; + decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn) + basic_machine=mips-dec + ;; + decsystem10* | dec10*) + basic_machine=pdp10-dec + os=-tops10 + ;; + decsystem20* | dec20*) + basic_machine=pdp10-dec + os=-tops20 + ;; + delta | 3300 | motorola-3300 | motorola-delta \ + | 3300-motorola | delta-motorola) + basic_machine=m68k-motorola + ;; + delta88) + basic_machine=m88k-motorola + os=-sysv3 + ;; + dicos) + basic_machine=i686-pc + os=-dicos + ;; + djgpp) + basic_machine=i586-pc + os=-msdosdjgpp + ;; + dpx20 | dpx20-*) + basic_machine=rs6000-bull + os=-bosx + ;; + dpx2* | dpx2*-bull) + basic_machine=m68k-bull + os=-sysv3 + ;; + ebmon29k) + basic_machine=a29k-amd + os=-ebmon + ;; + elxsi) + basic_machine=elxsi-elxsi + os=-bsd + ;; + encore | umax | mmax) + basic_machine=ns32k-encore + ;; + es1800 | OSE68k | ose68k | ose | OSE) + basic_machine=m68k-ericsson + os=-ose + ;; + fx2800) + basic_machine=i860-alliant + ;; + genix) + basic_machine=ns32k-ns + ;; + gmicro) + basic_machine=tron-gmicro + os=-sysv + ;; + go32) + basic_machine=i386-pc + os=-go32 + ;; + h3050r* | hiux*) + basic_machine=hppa1.1-hitachi + os=-hiuxwe2 + ;; + h8300hms) + basic_machine=h8300-hitachi + os=-hms + ;; + h8300xray) + basic_machine=h8300-hitachi + os=-xray + ;; + h8500hms) + basic_machine=h8500-hitachi + os=-hms + ;; + harris) + basic_machine=m88k-harris + os=-sysv3 + ;; + hp300-*) + basic_machine=m68k-hp + ;; + hp300bsd) + basic_machine=m68k-hp + os=-bsd + ;; + hp300hpux) + basic_machine=m68k-hp + os=-hpux + ;; + hp3k9[0-9][0-9] | hp9[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hp9k2[0-9][0-9] | hp9k31[0-9]) + basic_machine=m68000-hp + ;; + hp9k3[2-9][0-9]) + basic_machine=m68k-hp + ;; + hp9k6[0-9][0-9] | hp6[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hp9k7[0-79][0-9] | hp7[0-79][0-9]) + basic_machine=hppa1.1-hp + ;; + hp9k78[0-9] | hp78[0-9]) + # FIXME: really hppa2.0-hp + basic_machine=hppa1.1-hp + ;; + hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893) + # FIXME: really hppa2.0-hp + basic_machine=hppa1.1-hp + ;; + hp9k8[0-9][13679] | hp8[0-9][13679]) + basic_machine=hppa1.1-hp + ;; + hp9k8[0-9][0-9] | hp8[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hppa-next) + os=-nextstep3 + ;; + hppaosf) + basic_machine=hppa1.1-hp + os=-osf + ;; + hppro) + basic_machine=hppa1.1-hp + os=-proelf + ;; + i370-ibm* | ibm*) + basic_machine=i370-ibm + ;; + i*86v32) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-sysv32 + ;; + i*86v4*) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-sysv4 + ;; + i*86v) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-sysv + ;; + i*86sol2) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-solaris2 + ;; + i386mach) + basic_machine=i386-mach + os=-mach + ;; + i386-vsta | vsta) + basic_machine=i386-unknown + os=-vsta + ;; + iris | iris4d) + basic_machine=mips-sgi + case $os in + -irix*) + ;; + *) + os=-irix4 + ;; + esac + ;; + isi68 | isi) + basic_machine=m68k-isi + os=-sysv + ;; + m68knommu) + basic_machine=m68k-unknown + os=-linux + ;; + m68knommu-*) + basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'` + os=-linux + ;; + m88k-omron*) + basic_machine=m88k-omron + ;; + magnum | m3230) + basic_machine=mips-mips + os=-sysv + ;; + merlin) + basic_machine=ns32k-utek + os=-sysv + ;; + microblaze) + basic_machine=microblaze-xilinx + ;; + mingw32) + basic_machine=i386-pc + os=-mingw32 + ;; + mingw32ce) + basic_machine=arm-unknown + os=-mingw32ce + ;; + miniframe) + basic_machine=m68000-convergent + ;; + *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*) + basic_machine=m68k-atari + os=-mint + ;; + mips3*-*) + basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'` + ;; + mips3*) + basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown + ;; + monitor) + basic_machine=m68k-rom68k + os=-coff + ;; + morphos) + basic_machine=powerpc-unknown + os=-morphos + ;; + msdos) + basic_machine=i386-pc + os=-msdos + ;; + ms1-*) + basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'` + ;; + msys) + basic_machine=i386-pc + os=-msys + ;; + mvs) + basic_machine=i370-ibm + os=-mvs + ;; + nacl) + basic_machine=le32-unknown + os=-nacl + ;; + ncr3000) + basic_machine=i486-ncr + os=-sysv4 + ;; + netbsd386) + basic_machine=i386-unknown + os=-netbsd + ;; + netwinder) + basic_machine=armv4l-rebel + os=-linux + ;; + news | news700 | news800 | news900) + basic_machine=m68k-sony + os=-newsos + ;; + news1000) + basic_machine=m68030-sony + os=-newsos + ;; + news-3600 | risc-news) + basic_machine=mips-sony + os=-newsos + ;; + necv70) + basic_machine=v70-nec + os=-sysv + ;; + next | m*-next ) + basic_machine=m68k-next + case $os in + -nextstep* ) + ;; + -ns2*) + os=-nextstep2 + ;; + *) + os=-nextstep3 + ;; + esac + ;; + nh3000) + basic_machine=m68k-harris + os=-cxux + ;; + nh[45]000) + basic_machine=m88k-harris + os=-cxux + ;; + nindy960) + basic_machine=i960-intel + os=-nindy + ;; + mon960) + basic_machine=i960-intel + os=-mon960 + ;; + nonstopux) + basic_machine=mips-compaq + os=-nonstopux + ;; + np1) + basic_machine=np1-gould + ;; + neo-tandem) + basic_machine=neo-tandem + ;; + nse-tandem) + basic_machine=nse-tandem + ;; + nsr-tandem) + basic_machine=nsr-tandem + ;; + op50n-* | op60c-*) + basic_machine=hppa1.1-oki + os=-proelf + ;; + openrisc | openrisc-*) + basic_machine=or32-unknown + ;; + os400) + basic_machine=powerpc-ibm + os=-os400 + ;; + OSE68000 | ose68000) + basic_machine=m68000-ericsson + os=-ose + ;; + os68k) + basic_machine=m68k-none + os=-os68k + ;; + pa-hitachi) + basic_machine=hppa1.1-hitachi + os=-hiuxwe2 + ;; + paragon) + basic_machine=i860-intel + os=-osf + ;; + parisc) + basic_machine=hppa-unknown + os=-linux + ;; + parisc-*) + basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'` + os=-linux + ;; + pbd) + basic_machine=sparc-tti + ;; + pbb) + basic_machine=m68k-tti + ;; + pc532 | pc532-*) + basic_machine=ns32k-pc532 + ;; + pc98) + basic_machine=i386-pc + ;; + pc98-*) + basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentium | p5 | k5 | k6 | nexgen | viac3) + basic_machine=i586-pc + ;; + pentiumpro | p6 | 6x86 | athlon | athlon_*) + basic_machine=i686-pc + ;; + pentiumii | pentium2 | pentiumiii | pentium3) + basic_machine=i686-pc + ;; + pentium4) + basic_machine=i786-pc + ;; + pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*) + basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentiumpro-* | p6-* | 6x86-* | athlon-*) + basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*) + basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentium4-*) + basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pn) + basic_machine=pn-gould + ;; + power) basic_machine=power-ibm + ;; + ppc | ppcbe) basic_machine=powerpc-unknown + ;; + ppc-* | ppcbe-*) + basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ppcle | powerpclittle | ppc-le | powerpc-little) + basic_machine=powerpcle-unknown + ;; + ppcle-* | powerpclittle-*) + basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ppc64) basic_machine=powerpc64-unknown + ;; + ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ppc64le | powerpc64little | ppc64-le | powerpc64-little) + basic_machine=powerpc64le-unknown + ;; + ppc64le-* | powerpc64little-*) + basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ps2) + basic_machine=i386-ibm + ;; + pw32) + basic_machine=i586-unknown + os=-pw32 + ;; + rdos) + basic_machine=i386-pc + os=-rdos + ;; + rom68k) + basic_machine=m68k-rom68k + os=-coff + ;; + rm[46]00) + basic_machine=mips-siemens + ;; + rtpc | rtpc-*) + basic_machine=romp-ibm + ;; + s390 | s390-*) + basic_machine=s390-ibm + ;; + s390x | s390x-*) + basic_machine=s390x-ibm + ;; + sa29200) + basic_machine=a29k-amd + os=-udi + ;; + sb1) + basic_machine=mipsisa64sb1-unknown + ;; + sb1el) + basic_machine=mipsisa64sb1el-unknown + ;; + sde) + basic_machine=mipsisa32-sde + os=-elf + ;; + sei) + basic_machine=mips-sei + os=-seiux + ;; + sequent) + basic_machine=i386-sequent + ;; + sh) + basic_machine=sh-hitachi + os=-hms + ;; + sh5el) + basic_machine=sh5le-unknown + ;; + sh64) + basic_machine=sh64-unknown + ;; + sparclite-wrs | simso-wrs) + basic_machine=sparclite-wrs + os=-vxworks + ;; + sps7) + basic_machine=m68k-bull + os=-sysv2 + ;; + spur) + basic_machine=spur-unknown + ;; + st2000) + basic_machine=m68k-tandem + ;; + stratus) + basic_machine=i860-stratus + os=-sysv4 + ;; + strongarm-* | thumb-*) + basic_machine=arm-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + sun2) + basic_machine=m68000-sun + ;; + sun2os3) + basic_machine=m68000-sun + os=-sunos3 + ;; + sun2os4) + basic_machine=m68000-sun + os=-sunos4 + ;; + sun3os3) + basic_machine=m68k-sun + os=-sunos3 + ;; + sun3os4) + basic_machine=m68k-sun + os=-sunos4 + ;; + sun4os3) + basic_machine=sparc-sun + os=-sunos3 + ;; + sun4os4) + basic_machine=sparc-sun + os=-sunos4 + ;; + sun4sol2) + basic_machine=sparc-sun + os=-solaris2 + ;; + sun3 | sun3-*) + basic_machine=m68k-sun + ;; + sun4) + basic_machine=sparc-sun + ;; + sun386 | sun386i | roadrunner) + basic_machine=i386-sun + ;; + sv1) + basic_machine=sv1-cray + os=-unicos + ;; + symmetry) + basic_machine=i386-sequent + os=-dynix + ;; + t3e) + basic_machine=alphaev5-cray + os=-unicos + ;; + t90) + basic_machine=t90-cray + os=-unicos + ;; + tile*) + basic_machine=$basic_machine-unknown + os=-linux-gnu + ;; + tx39) + basic_machine=mipstx39-unknown + ;; + tx39el) + basic_machine=mipstx39el-unknown + ;; + toad1) + basic_machine=pdp10-xkl + os=-tops20 + ;; + tower | tower-32) + basic_machine=m68k-ncr + ;; + tpf) + basic_machine=s390x-ibm + os=-tpf + ;; + udi29k) + basic_machine=a29k-amd + os=-udi + ;; + ultra3) + basic_machine=a29k-nyu + os=-sym1 + ;; + v810 | necv810) + basic_machine=v810-nec + os=-none + ;; + vaxv) + basic_machine=vax-dec + os=-sysv + ;; + vms) + basic_machine=vax-dec + os=-vms + ;; + vpp*|vx|vx-*) + basic_machine=f301-fujitsu + ;; + vxworks960) + basic_machine=i960-wrs + os=-vxworks + ;; + vxworks68) + basic_machine=m68k-wrs + os=-vxworks + ;; + vxworks29k) + basic_machine=a29k-wrs + os=-vxworks + ;; + w65*) + basic_machine=w65-wdc + os=-none + ;; + w89k-*) + basic_machine=hppa1.1-winbond + os=-proelf + ;; + xbox) + basic_machine=i686-pc + os=-mingw32 + ;; + xps | xps100) + basic_machine=xps100-honeywell + ;; + xscale-* | xscalee[bl]-*) + basic_machine=`echo $basic_machine | sed 's/^xscale/arm/'` + ;; + ymp) + basic_machine=ymp-cray + os=-unicos + ;; + z8k-*-coff) + basic_machine=z8k-unknown + os=-sim + ;; + z80-*-coff) + basic_machine=z80-unknown + os=-sim + ;; + none) + basic_machine=none-none + os=-none + ;; + +# Here we handle the default manufacturer of certain CPU types. It is in +# some cases the only manufacturer, in others, it is the most popular. + w89k) + basic_machine=hppa1.1-winbond + ;; + op50n) + basic_machine=hppa1.1-oki + ;; + op60c) + basic_machine=hppa1.1-oki + ;; + romp) + basic_machine=romp-ibm + ;; + mmix) + basic_machine=mmix-knuth + ;; + rs6000) + basic_machine=rs6000-ibm + ;; + vax) + basic_machine=vax-dec + ;; + pdp10) + # there are many clones, so DEC is not a safe bet + basic_machine=pdp10-unknown + ;; + pdp11) + basic_machine=pdp11-dec + ;; + we32k) + basic_machine=we32k-att + ;; + sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele) + basic_machine=sh-unknown + ;; + sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v) + basic_machine=sparc-sun + ;; + cydra) + basic_machine=cydra-cydrome + ;; + orion) + basic_machine=orion-highlevel + ;; + orion105) + basic_machine=clipper-highlevel + ;; + mac | mpw | mac-mpw) + basic_machine=m68k-apple + ;; + pmac | pmac-mpw) + basic_machine=powerpc-apple + ;; + *-unknown) + # Make sure to match an already-canonicalized machine name. + ;; + *) + echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 + exit 1 + ;; +esac + +# Here we canonicalize certain aliases for manufacturers. +case $basic_machine in + *-digital*) + basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'` + ;; + *-commodore*) + basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'` + ;; + *) + ;; +esac + +# Decode manufacturer-specific aliases for certain operating systems. + +if [ x"$os" != x"" ] +then +case $os in + # First match some system type aliases + # that might get confused with valid system types. + # -solaris* is a basic system type, with this one exception. + -auroraux) + os=-auroraux + ;; + -solaris1 | -solaris1.*) + os=`echo $os | sed -e 's|solaris1|sunos4|'` + ;; + -solaris) + os=-solaris2 + ;; + -svr4*) + os=-sysv4 + ;; + -unixware*) + os=-sysv4.2uw + ;; + -gnu/linux*) + os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'` + ;; + # First accept the basic system types. + # The portable systems comes first. + # Each alternative MUST END IN A *, to match a version number. + # -sysv* is not here because it comes later, after sysvr4. + -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \ + | -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\ + | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \ + | -sym* | -kopensolaris* \ + | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \ + | -aos* | -aros* \ + | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \ + | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \ + | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \ + | -openbsd* | -solidbsd* \ + | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \ + | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \ + | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \ + | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \ + | -chorusos* | -chorusrdb* | -cegcc* \ + | -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \ + | -mingw32* | -linux-gnu* | -linux-android* \ + | -linux-newlib* | -linux-uclibc* \ + | -uxpv* | -beos* | -mpeix* | -udk* \ + | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \ + | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \ + | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \ + | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \ + | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \ + | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \ + | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es*) + # Remember, each alternative MUST END IN *, to match a version number. + ;; + -qnx*) + case $basic_machine in + x86-* | i*86-*) + ;; + *) + os=-nto$os + ;; + esac + ;; + -nto-qnx*) + ;; + -nto*) + os=`echo $os | sed -e 's|nto|nto-qnx|'` + ;; + -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \ + | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \ + | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*) + ;; + -mac*) + os=`echo $os | sed -e 's|mac|macos|'` + ;; + -linux-dietlibc) + os=-linux-dietlibc + ;; + -linux*) + os=`echo $os | sed -e 's|linux|linux-gnu|'` + ;; + -sunos5*) + os=`echo $os | sed -e 's|sunos5|solaris2|'` + ;; + -sunos6*) + os=`echo $os | sed -e 's|sunos6|solaris3|'` + ;; + -opened*) + os=-openedition + ;; + -os400*) + os=-os400 + ;; + -wince*) + os=-wince + ;; + -osfrose*) + os=-osfrose + ;; + -osf*) + os=-osf + ;; + -utek*) + os=-bsd + ;; + -dynix*) + os=-bsd + ;; + -acis*) + os=-aos + ;; + -atheos*) + os=-atheos + ;; + -syllable*) + os=-syllable + ;; + -386bsd) + os=-bsd + ;; + -ctix* | -uts*) + os=-sysv + ;; + -nova*) + os=-rtmk-nova + ;; + -ns2 ) + os=-nextstep2 + ;; + -nsk*) + os=-nsk + ;; + # Preserve the version number of sinix5. + -sinix5.*) + os=`echo $os | sed -e 's|sinix|sysv|'` + ;; + -sinix*) + os=-sysv4 + ;; + -tpf*) + os=-tpf + ;; + -triton*) + os=-sysv3 + ;; + -oss*) + os=-sysv3 + ;; + -svr4) + os=-sysv4 + ;; + -svr3) + os=-sysv3 + ;; + -sysvr4) + os=-sysv4 + ;; + # This must come after -sysvr4. + -sysv*) + ;; + -ose*) + os=-ose + ;; + -es1800*) + os=-ose + ;; + -xenix) + os=-xenix + ;; + -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) + os=-mint + ;; + -aros*) + os=-aros + ;; + -kaos*) + os=-kaos + ;; + -zvmoe) + os=-zvmoe + ;; + -dicos*) + os=-dicos + ;; + -nacl*) + ;; + -none) + ;; + *) + # Get rid of the `-' at the beginning of $os. + os=`echo $os | sed 's/[^-]*-//'` + echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2 + exit 1 + ;; +esac +else + +# Here we handle the default operating systems that come with various machines. +# The value should be what the vendor currently ships out the door with their +# machine or put another way, the most popular os provided with the machine. + +# Note that if you're going to try to match "-MANUFACTURER" here (say, +# "-sun"), then you have to tell the case statement up towards the top +# that MANUFACTURER isn't an operating system. Otherwise, code above +# will signal an error saying that MANUFACTURER isn't an operating +# system, and we'll never get to this point. + +case $basic_machine in + score-*) + os=-elf + ;; + spu-*) + os=-elf + ;; + *-acorn) + os=-riscix1.2 + ;; + arm*-rebel) + os=-linux + ;; + arm*-semi) + os=-aout + ;; + c4x-* | tic4x-*) + os=-coff + ;; + tic54x-*) + os=-coff + ;; + tic55x-*) + os=-coff + ;; + tic6x-*) + os=-coff + ;; + # This must come before the *-dec entry. + pdp10-*) + os=-tops20 + ;; + pdp11-*) + os=-none + ;; + *-dec | vax-*) + os=-ultrix4.2 + ;; + m68*-apollo) + os=-domain + ;; + i386-sun) + os=-sunos4.0.2 + ;; + m68000-sun) + os=-sunos3 + ;; + m68*-cisco) + os=-aout + ;; + mep-*) + os=-elf + ;; + mips*-cisco) + os=-elf + ;; + mips*-*) + os=-elf + ;; + or32-*) + os=-coff + ;; + *-tti) # must be before sparc entry or we get the wrong os. + os=-sysv3 + ;; + sparc-* | *-sun) + os=-sunos4.1.1 + ;; + *-be) + os=-beos + ;; + *-haiku) + os=-haiku + ;; + *-ibm) + os=-aix + ;; + *-knuth) + os=-mmixware + ;; + *-wec) + os=-proelf + ;; + *-winbond) + os=-proelf + ;; + *-oki) + os=-proelf + ;; + *-hp) + os=-hpux + ;; + *-hitachi) + os=-hiux + ;; + i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent) + os=-sysv + ;; + *-cbm) + os=-amigaos + ;; + *-dg) + os=-dgux + ;; + *-dolphin) + os=-sysv3 + ;; + m68k-ccur) + os=-rtu + ;; + m88k-omron*) + os=-luna + ;; + *-next ) + os=-nextstep + ;; + *-sequent) + os=-ptx + ;; + *-crds) + os=-unos + ;; + *-ns) + os=-genix + ;; + i370-*) + os=-mvs + ;; + *-next) + os=-nextstep3 + ;; + *-gould) + os=-sysv + ;; + *-highlevel) + os=-bsd + ;; + *-encore) + os=-bsd + ;; + *-sgi) + os=-irix + ;; + *-siemens) + os=-sysv4 + ;; + *-masscomp) + os=-rtu + ;; + f30[01]-fujitsu | f700-fujitsu) + os=-uxpv + ;; + *-rom68k) + os=-coff + ;; + *-*bug) + os=-coff + ;; + *-apple) + os=-macos + ;; + *-atari*) + os=-mint + ;; + *) + os=-none + ;; +esac +fi + +# Here we handle the case where we know the os, and the CPU type, but not the +# manufacturer. We pick the logical manufacturer. +vendor=unknown +case $basic_machine in + *-unknown) + case $os in + -riscix*) + vendor=acorn + ;; + -sunos*) + vendor=sun + ;; + -cnk*|-aix*) + vendor=ibm + ;; + -beos*) + vendor=be + ;; + -hpux*) + vendor=hp + ;; + -mpeix*) + vendor=hp + ;; + -hiux*) + vendor=hitachi + ;; + -unos*) + vendor=crds + ;; + -dgux*) + vendor=dg + ;; + -luna*) + vendor=omron + ;; + -genix*) + vendor=ns + ;; + -mvs* | -opened*) + vendor=ibm + ;; + -os400*) + vendor=ibm + ;; + -ptx*) + vendor=sequent + ;; + -tpf*) + vendor=ibm + ;; + -vxsim* | -vxworks* | -windiss*) + vendor=wrs + ;; + -aux*) + vendor=apple + ;; + -hms*) + vendor=hitachi + ;; + -mpw* | -macos*) + vendor=apple + ;; + -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) + vendor=atari + ;; + -vos*) + vendor=stratus + ;; + esac + basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"` + ;; +esac + +echo $basic_machine$os +exit + +# Local variables: +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "timestamp='" +# time-stamp-format: "%:y-%02m-%02d" +# time-stamp-end: "'" +# End: diff --git a/Zoltan-3.90/config/depcomp b/Zoltan-3.90/config/depcomp new file mode 100755 index 00000000..bd0ac089 --- /dev/null +++ b/Zoltan-3.90/config/depcomp @@ -0,0 +1,688 @@ +#! /bin/sh +# depcomp - compile a program generating dependencies as side-effects + +scriptversion=2011-12-04.11; # UTC + +# Copyright (C) 1999, 2000, 2003, 2004, 2005, 2006, 2007, 2009, 2010, +# 2011 Free Software Foundation, Inc. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +# Originally written by Alexandre Oliva . + +case $1 in + '') + echo "$0: No command. Try \`$0 --help' for more information." 1>&2 + exit 1; + ;; + -h | --h*) + cat <<\EOF +Usage: depcomp [--help] [--version] PROGRAM [ARGS] + +Run PROGRAMS ARGS to compile a file, generating dependencies +as side-effects. + +Environment variables: + depmode Dependency tracking mode. + source Source file read by `PROGRAMS ARGS'. + object Object file output by `PROGRAMS ARGS'. + DEPDIR directory where to store dependencies. + depfile Dependency file to output. + tmpdepfile Temporary file to use when outputting dependencies. + libtool Whether libtool is used (yes/no). + +Report bugs to . +EOF + exit $? + ;; + -v | --v*) + echo "depcomp $scriptversion" + exit $? + ;; +esac + +if test -z "$depmode" || test -z "$source" || test -z "$object"; then + echo "depcomp: Variables source, object and depmode must be set" 1>&2 + exit 1 +fi + +# Dependencies for sub/bar.o or sub/bar.obj go into sub/.deps/bar.Po. +depfile=${depfile-`echo "$object" | + sed 's|[^\\/]*$|'${DEPDIR-.deps}'/&|;s|\.\([^.]*\)$|.P\1|;s|Pobj$|Po|'`} +tmpdepfile=${tmpdepfile-`echo "$depfile" | sed 's/\.\([^.]*\)$/.T\1/'`} + +rm -f "$tmpdepfile" + +# Some modes work just like other modes, but use different flags. We +# parameterize here, but still list the modes in the big case below, +# to make depend.m4 easier to write. Note that we *cannot* use a case +# here, because this file can only contain one case statement. +if test "$depmode" = hp; then + # HP compiler uses -M and no extra arg. + gccflag=-M + depmode=gcc +fi + +if test "$depmode" = dashXmstdout; then + # This is just like dashmstdout with a different argument. + dashmflag=-xM + depmode=dashmstdout +fi + +cygpath_u="cygpath -u -f -" +if test "$depmode" = msvcmsys; then + # This is just like msvisualcpp but w/o cygpath translation. + # Just convert the backslash-escaped backslashes to single forward + # slashes to satisfy depend.m4 + cygpath_u='sed s,\\\\,/,g' + depmode=msvisualcpp +fi + +if test "$depmode" = msvc7msys; then + # This is just like msvc7 but w/o cygpath translation. + # Just convert the backslash-escaped backslashes to single forward + # slashes to satisfy depend.m4 + cygpath_u='sed s,\\\\,/,g' + depmode=msvc7 +fi + +case "$depmode" in +gcc3) +## gcc 3 implements dependency tracking that does exactly what +## we want. Yay! Note: for some reason libtool 1.4 doesn't like +## it if -MD -MP comes after the -MF stuff. Hmm. +## Unfortunately, FreeBSD c89 acceptance of flags depends upon +## the command line argument order; so add the flags where they +## appear in depend2.am. Note that the slowdown incurred here +## affects only configure: in makefiles, %FASTDEP% shortcuts this. + for arg + do + case $arg in + -c) set fnord "$@" -MT "$object" -MD -MP -MF "$tmpdepfile" "$arg" ;; + *) set fnord "$@" "$arg" ;; + esac + shift # fnord + shift # $arg + done + "$@" + stat=$? + if test $stat -eq 0; then : + else + rm -f "$tmpdepfile" + exit $stat + fi + mv "$tmpdepfile" "$depfile" + ;; + +gcc) +## There are various ways to get dependency output from gcc. Here's +## why we pick this rather obscure method: +## - Don't want to use -MD because we'd like the dependencies to end +## up in a subdir. Having to rename by hand is ugly. +## (We might end up doing this anyway to support other compilers.) +## - The DEPENDENCIES_OUTPUT environment variable makes gcc act like +## -MM, not -M (despite what the docs say). +## - Using -M directly means running the compiler twice (even worse +## than renaming). + if test -z "$gccflag"; then + gccflag=-MD, + fi + "$@" -Wp,"$gccflag$tmpdepfile" + stat=$? + if test $stat -eq 0; then : + else + rm -f "$tmpdepfile" + exit $stat + fi + rm -f "$depfile" + echo "$object : \\" > "$depfile" + alpha=ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz +## The second -e expression handles DOS-style file names with drive letters. + sed -e 's/^[^:]*: / /' \ + -e 's/^['$alpha']:\/[^:]*: / /' < "$tmpdepfile" >> "$depfile" +## This next piece of magic avoids the `deleted header file' problem. +## The problem is that when a header file which appears in a .P file +## is deleted, the dependency causes make to die (because there is +## typically no way to rebuild the header). We avoid this by adding +## dummy dependencies for each header file. Too bad gcc doesn't do +## this for us directly. + tr ' ' ' +' < "$tmpdepfile" | +## Some versions of gcc put a space before the `:'. On the theory +## that the space means something, we add a space to the output as +## well. hp depmode also adds that space, but also prefixes the VPATH +## to the object. Take care to not repeat it in the output. +## Some versions of the HPUX 10.20 sed can't process this invocation +## correctly. Breaking it into two sed invocations is a workaround. + sed -e 's/^\\$//' -e '/^$/d' -e "s|.*$object$||" -e '/:$/d' \ + | sed -e 's/$/ :/' >> "$depfile" + rm -f "$tmpdepfile" + ;; + +hp) + # This case exists only to let depend.m4 do its work. It works by + # looking at the text of this script. This case will never be run, + # since it is checked for above. + exit 1 + ;; + +sgi) + if test "$libtool" = yes; then + "$@" "-Wp,-MDupdate,$tmpdepfile" + else + "$@" -MDupdate "$tmpdepfile" + fi + stat=$? + if test $stat -eq 0; then : + else + rm -f "$tmpdepfile" + exit $stat + fi + rm -f "$depfile" + + if test -f "$tmpdepfile"; then # yes, the sourcefile depend on other files + echo "$object : \\" > "$depfile" + + # Clip off the initial element (the dependent). Don't try to be + # clever and replace this with sed code, as IRIX sed won't handle + # lines with more than a fixed number of characters (4096 in + # IRIX 6.2 sed, 8192 in IRIX 6.5). We also remove comment lines; + # the IRIX cc adds comments like `#:fec' to the end of the + # dependency line. + tr ' ' ' +' < "$tmpdepfile" \ + | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' | \ + tr ' +' ' ' >> "$depfile" + echo >> "$depfile" + + # The second pass generates a dummy entry for each header file. + tr ' ' ' +' < "$tmpdepfile" \ + | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' -e 's/$/:/' \ + >> "$depfile" + else + # The sourcefile does not contain any dependencies, so just + # store a dummy comment line, to avoid errors with the Makefile + # "include basename.Plo" scheme. + echo "#dummy" > "$depfile" + fi + rm -f "$tmpdepfile" + ;; + +aix) + # The C for AIX Compiler uses -M and outputs the dependencies + # in a .u file. In older versions, this file always lives in the + # current directory. Also, the AIX compiler puts `$object:' at the + # start of each line; $object doesn't have directory information. + # Version 6 uses the directory in both cases. + dir=`echo "$object" | sed -e 's|/[^/]*$|/|'` + test "x$dir" = "x$object" && dir= + base=`echo "$object" | sed -e 's|^.*/||' -e 's/\.o$//' -e 's/\.lo$//'` + if test "$libtool" = yes; then + tmpdepfile1=$dir$base.u + tmpdepfile2=$base.u + tmpdepfile3=$dir.libs/$base.u + "$@" -Wc,-M + else + tmpdepfile1=$dir$base.u + tmpdepfile2=$dir$base.u + tmpdepfile3=$dir$base.u + "$@" -M + fi + stat=$? + + if test $stat -eq 0; then : + else + rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" + exit $stat + fi + + for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" + do + test -f "$tmpdepfile" && break + done + if test -f "$tmpdepfile"; then + # Each line is of the form `foo.o: dependent.h'. + # Do two passes, one to just change these to + # `$object: dependent.h' and one to simply `dependent.h:'. + sed -e "s,^.*\.[a-z]*:,$object:," < "$tmpdepfile" > "$depfile" + # That's a tab and a space in the []. + sed -e 's,^.*\.[a-z]*:[ ]*,,' -e 's,$,:,' < "$tmpdepfile" >> "$depfile" + else + # The sourcefile does not contain any dependencies, so just + # store a dummy comment line, to avoid errors with the Makefile + # "include basename.Plo" scheme. + echo "#dummy" > "$depfile" + fi + rm -f "$tmpdepfile" + ;; + +icc) + # Intel's C compiler understands `-MD -MF file'. However on + # icc -MD -MF foo.d -c -o sub/foo.o sub/foo.c + # ICC 7.0 will fill foo.d with something like + # foo.o: sub/foo.c + # foo.o: sub/foo.h + # which is wrong. We want: + # sub/foo.o: sub/foo.c + # sub/foo.o: sub/foo.h + # sub/foo.c: + # sub/foo.h: + # ICC 7.1 will output + # foo.o: sub/foo.c sub/foo.h + # and will wrap long lines using \ : + # foo.o: sub/foo.c ... \ + # sub/foo.h ... \ + # ... + + "$@" -MD -MF "$tmpdepfile" + stat=$? + if test $stat -eq 0; then : + else + rm -f "$tmpdepfile" + exit $stat + fi + rm -f "$depfile" + # Each line is of the form `foo.o: dependent.h', + # or `foo.o: dep1.h dep2.h \', or ` dep3.h dep4.h \'. + # Do two passes, one to just change these to + # `$object: dependent.h' and one to simply `dependent.h:'. + sed "s,^[^:]*:,$object :," < "$tmpdepfile" > "$depfile" + # Some versions of the HPUX 10.20 sed can't process this invocation + # correctly. Breaking it into two sed invocations is a workaround. + sed 's,^[^:]*: \(.*\)$,\1,;s/^\\$//;/^$/d;/:$/d' < "$tmpdepfile" | + sed -e 's/$/ :/' >> "$depfile" + rm -f "$tmpdepfile" + ;; + +hp2) + # The "hp" stanza above does not work with aCC (C++) and HP's ia64 + # compilers, which have integrated preprocessors. The correct option + # to use with these is +Maked; it writes dependencies to a file named + # 'foo.d', which lands next to the object file, wherever that + # happens to be. + # Much of this is similar to the tru64 case; see comments there. + dir=`echo "$object" | sed -e 's|/[^/]*$|/|'` + test "x$dir" = "x$object" && dir= + base=`echo "$object" | sed -e 's|^.*/||' -e 's/\.o$//' -e 's/\.lo$//'` + if test "$libtool" = yes; then + tmpdepfile1=$dir$base.d + tmpdepfile2=$dir.libs/$base.d + "$@" -Wc,+Maked + else + tmpdepfile1=$dir$base.d + tmpdepfile2=$dir$base.d + "$@" +Maked + fi + stat=$? + if test $stat -eq 0; then : + else + rm -f "$tmpdepfile1" "$tmpdepfile2" + exit $stat + fi + + for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" + do + test -f "$tmpdepfile" && break + done + if test -f "$tmpdepfile"; then + sed -e "s,^.*\.[a-z]*:,$object:," "$tmpdepfile" > "$depfile" + # Add `dependent.h:' lines. + sed -ne '2,${ + s/^ *// + s/ \\*$// + s/$/:/ + p + }' "$tmpdepfile" >> "$depfile" + else + echo "#dummy" > "$depfile" + fi + rm -f "$tmpdepfile" "$tmpdepfile2" + ;; + +tru64) + # The Tru64 compiler uses -MD to generate dependencies as a side + # effect. `cc -MD -o foo.o ...' puts the dependencies into `foo.o.d'. + # At least on Alpha/Redhat 6.1, Compaq CCC V6.2-504 seems to put + # dependencies in `foo.d' instead, so we check for that too. + # Subdirectories are respected. + dir=`echo "$object" | sed -e 's|/[^/]*$|/|'` + test "x$dir" = "x$object" && dir= + base=`echo "$object" | sed -e 's|^.*/||' -e 's/\.o$//' -e 's/\.lo$//'` + + if test "$libtool" = yes; then + # With Tru64 cc, shared objects can also be used to make a + # static library. This mechanism is used in libtool 1.4 series to + # handle both shared and static libraries in a single compilation. + # With libtool 1.4, dependencies were output in $dir.libs/$base.lo.d. + # + # With libtool 1.5 this exception was removed, and libtool now + # generates 2 separate objects for the 2 libraries. These two + # compilations output dependencies in $dir.libs/$base.o.d and + # in $dir$base.o.d. We have to check for both files, because + # one of the two compilations can be disabled. We should prefer + # $dir$base.o.d over $dir.libs/$base.o.d because the latter is + # automatically cleaned when .libs/ is deleted, while ignoring + # the former would cause a distcleancheck panic. + tmpdepfile1=$dir.libs/$base.lo.d # libtool 1.4 + tmpdepfile2=$dir$base.o.d # libtool 1.5 + tmpdepfile3=$dir.libs/$base.o.d # libtool 1.5 + tmpdepfile4=$dir.libs/$base.d # Compaq CCC V6.2-504 + "$@" -Wc,-MD + else + tmpdepfile1=$dir$base.o.d + tmpdepfile2=$dir$base.d + tmpdepfile3=$dir$base.d + tmpdepfile4=$dir$base.d + "$@" -MD + fi + + stat=$? + if test $stat -eq 0; then : + else + rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" "$tmpdepfile4" + exit $stat + fi + + for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" "$tmpdepfile4" + do + test -f "$tmpdepfile" && break + done + if test -f "$tmpdepfile"; then + sed -e "s,^.*\.[a-z]*:,$object:," < "$tmpdepfile" > "$depfile" + # That's a tab and a space in the []. + sed -e 's,^.*\.[a-z]*:[ ]*,,' -e 's,$,:,' < "$tmpdepfile" >> "$depfile" + else + echo "#dummy" > "$depfile" + fi + rm -f "$tmpdepfile" + ;; + +msvc7) + if test "$libtool" = yes; then + showIncludes=-Wc,-showIncludes + else + showIncludes=-showIncludes + fi + "$@" $showIncludes > "$tmpdepfile" + stat=$? + grep -v '^Note: including file: ' "$tmpdepfile" + if test "$stat" = 0; then : + else + rm -f "$tmpdepfile" + exit $stat + fi + rm -f "$depfile" + echo "$object : \\" > "$depfile" + # The first sed program below extracts the file names and escapes + # backslashes for cygpath. The second sed program outputs the file + # name when reading, but also accumulates all include files in the + # hold buffer in order to output them again at the end. This only + # works with sed implementations that can handle large buffers. + sed < "$tmpdepfile" -n ' +/^Note: including file: *\(.*\)/ { + s//\1/ + s/\\/\\\\/g + p +}' | $cygpath_u | sort -u | sed -n ' +s/ /\\ /g +s/\(.*\)/ \1 \\/p +s/.\(.*\) \\/\1:/ +H +$ { + s/.*/ / + G + p +}' >> "$depfile" + rm -f "$tmpdepfile" + ;; + +msvc7msys) + # This case exists only to let depend.m4 do its work. It works by + # looking at the text of this script. This case will never be run, + # since it is checked for above. + exit 1 + ;; + +#nosideeffect) + # This comment above is used by automake to tell side-effect + # dependency tracking mechanisms from slower ones. + +dashmstdout) + # Important note: in order to support this mode, a compiler *must* + # always write the preprocessed file to stdout, regardless of -o. + "$@" || exit $? + + # Remove the call to Libtool. + if test "$libtool" = yes; then + while test "X$1" != 'X--mode=compile'; do + shift + done + shift + fi + + # Remove `-o $object'. + IFS=" " + for arg + do + case $arg in + -o) + shift + ;; + $object) + shift + ;; + *) + set fnord "$@" "$arg" + shift # fnord + shift # $arg + ;; + esac + done + + test -z "$dashmflag" && dashmflag=-M + # Require at least two characters before searching for `:' + # in the target name. This is to cope with DOS-style filenames: + # a dependency such as `c:/foo/bar' could be seen as target `c' otherwise. + "$@" $dashmflag | + sed 's:^[ ]*[^: ][^:][^:]*\:[ ]*:'"$object"'\: :' > "$tmpdepfile" + rm -f "$depfile" + cat < "$tmpdepfile" > "$depfile" + tr ' ' ' +' < "$tmpdepfile" | \ +## Some versions of the HPUX 10.20 sed can't process this invocation +## correctly. Breaking it into two sed invocations is a workaround. + sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' | sed -e 's/$/ :/' >> "$depfile" + rm -f "$tmpdepfile" + ;; + +dashXmstdout) + # This case only exists to satisfy depend.m4. It is never actually + # run, as this mode is specially recognized in the preamble. + exit 1 + ;; + +makedepend) + "$@" || exit $? + # Remove any Libtool call + if test "$libtool" = yes; then + while test "X$1" != 'X--mode=compile'; do + shift + done + shift + fi + # X makedepend + shift + cleared=no eat=no + for arg + do + case $cleared in + no) + set ""; shift + cleared=yes ;; + esac + if test $eat = yes; then + eat=no + continue + fi + case "$arg" in + -D*|-I*) + set fnord "$@" "$arg"; shift ;; + # Strip any option that makedepend may not understand. Remove + # the object too, otherwise makedepend will parse it as a source file. + -arch) + eat=yes ;; + -*|$object) + ;; + *) + set fnord "$@" "$arg"; shift ;; + esac + done + obj_suffix=`echo "$object" | sed 's/^.*\././'` + touch "$tmpdepfile" + ${MAKEDEPEND-makedepend} -o"$obj_suffix" -f"$tmpdepfile" "$@" + rm -f "$depfile" + # makedepend may prepend the VPATH from the source file name to the object. + # No need to regex-escape $object, excess matching of '.' is harmless. + sed "s|^.*\($object *:\)|\1|" "$tmpdepfile" > "$depfile" + sed '1,2d' "$tmpdepfile" | tr ' ' ' +' | \ +## Some versions of the HPUX 10.20 sed can't process this invocation +## correctly. Breaking it into two sed invocations is a workaround. + sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' | sed -e 's/$/ :/' >> "$depfile" + rm -f "$tmpdepfile" "$tmpdepfile".bak + ;; + +cpp) + # Important note: in order to support this mode, a compiler *must* + # always write the preprocessed file to stdout. + "$@" || exit $? + + # Remove the call to Libtool. + if test "$libtool" = yes; then + while test "X$1" != 'X--mode=compile'; do + shift + done + shift + fi + + # Remove `-o $object'. + IFS=" " + for arg + do + case $arg in + -o) + shift + ;; + $object) + shift + ;; + *) + set fnord "$@" "$arg" + shift # fnord + shift # $arg + ;; + esac + done + + "$@" -E | + sed -n -e '/^# [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' \ + -e '/^#line [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' | + sed '$ s: \\$::' > "$tmpdepfile" + rm -f "$depfile" + echo "$object : \\" > "$depfile" + cat < "$tmpdepfile" >> "$depfile" + sed < "$tmpdepfile" '/^$/d;s/^ //;s/ \\$//;s/$/ :/' >> "$depfile" + rm -f "$tmpdepfile" + ;; + +msvisualcpp) + # Important note: in order to support this mode, a compiler *must* + # always write the preprocessed file to stdout. + "$@" || exit $? + + # Remove the call to Libtool. + if test "$libtool" = yes; then + while test "X$1" != 'X--mode=compile'; do + shift + done + shift + fi + + IFS=" " + for arg + do + case "$arg" in + -o) + shift + ;; + $object) + shift + ;; + "-Gm"|"/Gm"|"-Gi"|"/Gi"|"-ZI"|"/ZI") + set fnord "$@" + shift + shift + ;; + *) + set fnord "$@" "$arg" + shift + shift + ;; + esac + done + "$@" -E 2>/dev/null | + sed -n '/^#line [0-9][0-9]* "\([^"]*\)"/ s::\1:p' | $cygpath_u | sort -u > "$tmpdepfile" + rm -f "$depfile" + echo "$object : \\" > "$depfile" + sed < "$tmpdepfile" -n -e 's% %\\ %g' -e '/^\(.*\)$/ s:: \1 \\:p' >> "$depfile" + echo " " >> "$depfile" + sed < "$tmpdepfile" -n -e 's% %\\ %g' -e '/^\(.*\)$/ s::\1\::p' >> "$depfile" + rm -f "$tmpdepfile" + ;; + +msvcmsys) + # This case exists only to let depend.m4 do its work. It works by + # looking at the text of this script. This case will never be run, + # since it is checked for above. + exit 1 + ;; + +none) + exec "$@" + ;; + +*) + echo "Unknown depmode $depmode" 1>&2 + exit 1 + ;; +esac + +exit 0 + +# Local Variables: +# mode: shell-script +# sh-indentation: 2 +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "scriptversion=" +# time-stamp-format: "%:y-%02m-%02d.%02H" +# time-stamp-time-zone: "UTC" +# time-stamp-end: "; # UTC" +# End: diff --git a/Zoltan-3.90/config/generate-makeoptions.pl b/Zoltan-3.90/config/generate-makeoptions.pl new file mode 100755 index 00000000..fe466960 --- /dev/null +++ b/Zoltan-3.90/config/generate-makeoptions.pl @@ -0,0 +1,88 @@ +#!/usr/bin/perl -w +# +# This perl script graps a bunch of make macro definitions +# generated for Teuchos that can be used in other makefiles. +# This is dumped to stdout and can be redirected to build +# a makefile. +# +# Note, this script must be maintained to be current for +# the Teuchos makefile. +# +use strict; + +if( !((@ARGV) && scalar(@ARGV)==2) ) { + die "Error, this script takes two and only two arguments (makefile_name package_name).!\n"; +} + +my $makefile_name = shift; +my $package_name = shift; + +# +# List the macros you want to grep and include in the output +# +my @macros = + ( + "CC" + ,"CXX" + ,"F77" + ,"FC" + ,"CXXLD" + ,"DEFS" + ,"CPPFLAGS" + ,"CFLAGS" + ,"CXXFLAGS" + ,"FFLAGS" + ,"FCFLAGS" + ,"LDFLAGS" + ,"FLIBS" + ,"BLAS_LIBS" + ,"LAPACK_LIBS" + ,"prefix" + ,"AR" + ,"ALTERNATE_AR" + ,"libteuchos_a_AR" + ,"RANLIB" + ); + +open FILE_IN, "<$makefile_name" || die "The file $makefile_name could not be opended for input\n"; +my @makefile_name_array = ; +close FILE_IN; + +# +# Find the above macros and append "${package_name}_" to the beginning. +# +my @new_macros; +my $add_next_line = 0; +foreach( @makefile_name_array ) { + my $line = $_; + if($add_next_line) { + push @new_macros, $line; + if( substr($line,-1,1) eq "\\" ) { + $add_next_line = 1; + } + else { + $add_next_line = 0; + } + next; + } + #print "Line = $line"; + foreach( @macros ) { + my $macro_search = "^${_} "; + #print "Macro search = \'$macro_search\'\n"; + if( $line=~/$macro_search/ ) { + #print "Adding Macro!\n"; + my $find_str = '\(CXX\)'; + my $replace_str = "(${package_name}_CXX)"; + $line=~s/$find_str/$replace_str/; + push @new_macros, "${package_name}_${line}"; + if( substr($line,-2,1) eq "\\" ) { + $add_next_line = 1; + } + else { + $add_next_line = 0; + } + } + } +} + +print join("",@new_macros); diff --git a/Zoltan-3.90/config/install-sh b/Zoltan-3.90/config/install-sh new file mode 100755 index 00000000..a9244eb0 --- /dev/null +++ b/Zoltan-3.90/config/install-sh @@ -0,0 +1,527 @@ +#!/bin/sh +# install - install a program, script, or datafile + +scriptversion=2011-01-19.21; # UTC + +# This originates from X11R5 (mit/util/scripts/install.sh), which was +# later released in X11R6 (xc/config/util/install.sh) with the +# following copyright and license. +# +# Copyright (C) 1994 X Consortium +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to +# deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +# sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN +# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNEC- +# TION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# +# Except as contained in this notice, the name of the X Consortium shall not +# be used in advertising or otherwise to promote the sale, use or other deal- +# ings in this Software without prior written authorization from the X Consor- +# tium. +# +# +# FSF changes to this file are in the public domain. +# +# Calling this script install-sh is preferred over install.sh, to prevent +# `make' implicit rules from creating a file called install from it +# when there is no Makefile. +# +# This script is compatible with the BSD install script, but was written +# from scratch. + +nl=' +' +IFS=" "" $nl" + +# set DOITPROG to echo to test this script + +# Don't use :- since 4.3BSD and earlier shells don't like it. +doit=${DOITPROG-} +if test -z "$doit"; then + doit_exec=exec +else + doit_exec=$doit +fi + +# Put in absolute file names if you don't have them in your path; +# or use environment vars. + +chgrpprog=${CHGRPPROG-chgrp} +chmodprog=${CHMODPROG-chmod} +chownprog=${CHOWNPROG-chown} +cmpprog=${CMPPROG-cmp} +cpprog=${CPPROG-cp} +mkdirprog=${MKDIRPROG-mkdir} +mvprog=${MVPROG-mv} +rmprog=${RMPROG-rm} +stripprog=${STRIPPROG-strip} + +posix_glob='?' +initialize_posix_glob=' + test "$posix_glob" != "?" || { + if (set -f) 2>/dev/null; then + posix_glob= + else + posix_glob=: + fi + } +' + +posix_mkdir= + +# Desired mode of installed file. +mode=0755 + +chgrpcmd= +chmodcmd=$chmodprog +chowncmd= +mvcmd=$mvprog +rmcmd="$rmprog -f" +stripcmd= + +src= +dst= +dir_arg= +dst_arg= + +copy_on_change=false +no_target_directory= + +usage="\ +Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE + or: $0 [OPTION]... SRCFILES... DIRECTORY + or: $0 [OPTION]... -t DIRECTORY SRCFILES... + or: $0 [OPTION]... -d DIRECTORIES... + +In the 1st form, copy SRCFILE to DSTFILE. +In the 2nd and 3rd, copy all SRCFILES to DIRECTORY. +In the 4th, create DIRECTORIES. + +Options: + --help display this help and exit. + --version display version info and exit. + + -c (ignored) + -C install only if different (preserve the last data modification time) + -d create directories instead of installing files. + -g GROUP $chgrpprog installed files to GROUP. + -m MODE $chmodprog installed files to MODE. + -o USER $chownprog installed files to USER. + -s $stripprog installed files. + -t DIRECTORY install into DIRECTORY. + -T report an error if DSTFILE is a directory. + +Environment variables override the default commands: + CHGRPPROG CHMODPROG CHOWNPROG CMPPROG CPPROG MKDIRPROG MVPROG + RMPROG STRIPPROG +" + +while test $# -ne 0; do + case $1 in + -c) ;; + + -C) copy_on_change=true;; + + -d) dir_arg=true;; + + -g) chgrpcmd="$chgrpprog $2" + shift;; + + --help) echo "$usage"; exit $?;; + + -m) mode=$2 + case $mode in + *' '* | *' '* | *' +'* | *'*'* | *'?'* | *'['*) + echo "$0: invalid mode: $mode" >&2 + exit 1;; + esac + shift;; + + -o) chowncmd="$chownprog $2" + shift;; + + -s) stripcmd=$stripprog;; + + -t) dst_arg=$2 + # Protect names problematic for `test' and other utilities. + case $dst_arg in + -* | [=\(\)!]) dst_arg=./$dst_arg;; + esac + shift;; + + -T) no_target_directory=true;; + + --version) echo "$0 $scriptversion"; exit $?;; + + --) shift + break;; + + -*) echo "$0: invalid option: $1" >&2 + exit 1;; + + *) break;; + esac + shift +done + +if test $# -ne 0 && test -z "$dir_arg$dst_arg"; then + # When -d is used, all remaining arguments are directories to create. + # When -t is used, the destination is already specified. + # Otherwise, the last argument is the destination. Remove it from $@. + for arg + do + if test -n "$dst_arg"; then + # $@ is not empty: it contains at least $arg. + set fnord "$@" "$dst_arg" + shift # fnord + fi + shift # arg + dst_arg=$arg + # Protect names problematic for `test' and other utilities. + case $dst_arg in + -* | [=\(\)!]) dst_arg=./$dst_arg;; + esac + done +fi + +if test $# -eq 0; then + if test -z "$dir_arg"; then + echo "$0: no input file specified." >&2 + exit 1 + fi + # It's OK to call `install-sh -d' without argument. + # This can happen when creating conditional directories. + exit 0 +fi + +if test -z "$dir_arg"; then + do_exit='(exit $ret); exit $ret' + trap "ret=129; $do_exit" 1 + trap "ret=130; $do_exit" 2 + trap "ret=141; $do_exit" 13 + trap "ret=143; $do_exit" 15 + + # Set umask so as not to create temps with too-generous modes. + # However, 'strip' requires both read and write access to temps. + case $mode in + # Optimize common cases. + *644) cp_umask=133;; + *755) cp_umask=22;; + + *[0-7]) + if test -z "$stripcmd"; then + u_plus_rw= + else + u_plus_rw='% 200' + fi + cp_umask=`expr '(' 777 - $mode % 1000 ')' $u_plus_rw`;; + *) + if test -z "$stripcmd"; then + u_plus_rw= + else + u_plus_rw=,u+rw + fi + cp_umask=$mode$u_plus_rw;; + esac +fi + +for src +do + # Protect names problematic for `test' and other utilities. + case $src in + -* | [=\(\)!]) src=./$src;; + esac + + if test -n "$dir_arg"; then + dst=$src + dstdir=$dst + test -d "$dstdir" + dstdir_status=$? + else + + # Waiting for this to be detected by the "$cpprog $src $dsttmp" command + # might cause directories to be created, which would be especially bad + # if $src (and thus $dsttmp) contains '*'. + if test ! -f "$src" && test ! -d "$src"; then + echo "$0: $src does not exist." >&2 + exit 1 + fi + + if test -z "$dst_arg"; then + echo "$0: no destination specified." >&2 + exit 1 + fi + dst=$dst_arg + + # If destination is a directory, append the input filename; won't work + # if double slashes aren't ignored. + if test -d "$dst"; then + if test -n "$no_target_directory"; then + echo "$0: $dst_arg: Is a directory" >&2 + exit 1 + fi + dstdir=$dst + dst=$dstdir/`basename "$src"` + dstdir_status=0 + else + # Prefer dirname, but fall back on a substitute if dirname fails. + dstdir=` + (dirname "$dst") 2>/dev/null || + expr X"$dst" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$dst" : 'X\(//\)[^/]' \| \ + X"$dst" : 'X\(//\)$' \| \ + X"$dst" : 'X\(/\)' \| . 2>/dev/null || + echo X"$dst" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q' + ` + + test -d "$dstdir" + dstdir_status=$? + fi + fi + + obsolete_mkdir_used=false + + if test $dstdir_status != 0; then + case $posix_mkdir in + '') + # Create intermediate dirs using mode 755 as modified by the umask. + # This is like FreeBSD 'install' as of 1997-10-28. + umask=`umask` + case $stripcmd.$umask in + # Optimize common cases. + *[2367][2367]) mkdir_umask=$umask;; + .*0[02][02] | .[02][02] | .[02]) mkdir_umask=22;; + + *[0-7]) + mkdir_umask=`expr $umask + 22 \ + - $umask % 100 % 40 + $umask % 20 \ + - $umask % 10 % 4 + $umask % 2 + `;; + *) mkdir_umask=$umask,go-w;; + esac + + # With -d, create the new directory with the user-specified mode. + # Otherwise, rely on $mkdir_umask. + if test -n "$dir_arg"; then + mkdir_mode=-m$mode + else + mkdir_mode= + fi + + posix_mkdir=false + case $umask in + *[123567][0-7][0-7]) + # POSIX mkdir -p sets u+wx bits regardless of umask, which + # is incompatible with FreeBSD 'install' when (umask & 300) != 0. + ;; + *) + tmpdir=${TMPDIR-/tmp}/ins$RANDOM-$$ + trap 'ret=$?; rmdir "$tmpdir/d" "$tmpdir" 2>/dev/null; exit $ret' 0 + + if (umask $mkdir_umask && + exec $mkdirprog $mkdir_mode -p -- "$tmpdir/d") >/dev/null 2>&1 + then + if test -z "$dir_arg" || { + # Check for POSIX incompatibilities with -m. + # HP-UX 11.23 and IRIX 6.5 mkdir -m -p sets group- or + # other-writeable bit of parent directory when it shouldn't. + # FreeBSD 6.1 mkdir -m -p sets mode of existing directory. + ls_ld_tmpdir=`ls -ld "$tmpdir"` + case $ls_ld_tmpdir in + d????-?r-*) different_mode=700;; + d????-?--*) different_mode=755;; + *) false;; + esac && + $mkdirprog -m$different_mode -p -- "$tmpdir" && { + ls_ld_tmpdir_1=`ls -ld "$tmpdir"` + test "$ls_ld_tmpdir" = "$ls_ld_tmpdir_1" + } + } + then posix_mkdir=: + fi + rmdir "$tmpdir/d" "$tmpdir" + else + # Remove any dirs left behind by ancient mkdir implementations. + rmdir ./$mkdir_mode ./-p ./-- 2>/dev/null + fi + trap '' 0;; + esac;; + esac + + if + $posix_mkdir && ( + umask $mkdir_umask && + $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir" + ) + then : + else + + # The umask is ridiculous, or mkdir does not conform to POSIX, + # or it failed possibly due to a race condition. Create the + # directory the slow way, step by step, checking for races as we go. + + case $dstdir in + /*) prefix='/';; + [-=\(\)!]*) prefix='./';; + *) prefix='';; + esac + + eval "$initialize_posix_glob" + + oIFS=$IFS + IFS=/ + $posix_glob set -f + set fnord $dstdir + shift + $posix_glob set +f + IFS=$oIFS + + prefixes= + + for d + do + test X"$d" = X && continue + + prefix=$prefix$d + if test -d "$prefix"; then + prefixes= + else + if $posix_mkdir; then + (umask=$mkdir_umask && + $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir") && break + # Don't fail if two instances are running concurrently. + test -d "$prefix" || exit 1 + else + case $prefix in + *\'*) qprefix=`echo "$prefix" | sed "s/'/'\\\\\\\\''/g"`;; + *) qprefix=$prefix;; + esac + prefixes="$prefixes '$qprefix'" + fi + fi + prefix=$prefix/ + done + + if test -n "$prefixes"; then + # Don't fail if two instances are running concurrently. + (umask $mkdir_umask && + eval "\$doit_exec \$mkdirprog $prefixes") || + test -d "$dstdir" || exit 1 + obsolete_mkdir_used=true + fi + fi + fi + + if test -n "$dir_arg"; then + { test -z "$chowncmd" || $doit $chowncmd "$dst"; } && + { test -z "$chgrpcmd" || $doit $chgrpcmd "$dst"; } && + { test "$obsolete_mkdir_used$chowncmd$chgrpcmd" = false || + test -z "$chmodcmd" || $doit $chmodcmd $mode "$dst"; } || exit 1 + else + + # Make a couple of temp file names in the proper directory. + dsttmp=$dstdir/_inst.$$_ + rmtmp=$dstdir/_rm.$$_ + + # Trap to clean up those temp files at exit. + trap 'ret=$?; rm -f "$dsttmp" "$rmtmp" && exit $ret' 0 + + # Copy the file name to the temp name. + (umask $cp_umask && $doit_exec $cpprog "$src" "$dsttmp") && + + # and set any options; do chmod last to preserve setuid bits. + # + # If any of these fail, we abort the whole thing. If we want to + # ignore errors from any of these, just make sure not to ignore + # errors from the above "$doit $cpprog $src $dsttmp" command. + # + { test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } && + { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } && + { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } && + { test -z "$chmodcmd" || $doit $chmodcmd $mode "$dsttmp"; } && + + # If -C, don't bother to copy if it wouldn't change the file. + if $copy_on_change && + old=`LC_ALL=C ls -dlL "$dst" 2>/dev/null` && + new=`LC_ALL=C ls -dlL "$dsttmp" 2>/dev/null` && + + eval "$initialize_posix_glob" && + $posix_glob set -f && + set X $old && old=:$2:$4:$5:$6 && + set X $new && new=:$2:$4:$5:$6 && + $posix_glob set +f && + + test "$old" = "$new" && + $cmpprog "$dst" "$dsttmp" >/dev/null 2>&1 + then + rm -f "$dsttmp" + else + # Rename the file to the real destination. + $doit $mvcmd -f "$dsttmp" "$dst" 2>/dev/null || + + # The rename failed, perhaps because mv can't rename something else + # to itself, or perhaps because mv is so ancient that it does not + # support -f. + { + # Now remove or move aside any old file at destination location. + # We try this two ways since rm can't unlink itself on some + # systems and the destination file might be busy for other + # reasons. In this case, the final cleanup might fail but the new + # file should still install successfully. + { + test ! -f "$dst" || + $doit $rmcmd -f "$dst" 2>/dev/null || + { $doit $mvcmd -f "$dst" "$rmtmp" 2>/dev/null && + { $doit $rmcmd -f "$rmtmp" 2>/dev/null; :; } + } || + { echo "$0: cannot unlink or rename $dst" >&2 + (exit 1); exit 1 + } + } && + + # Now rename the file to the real destination. + $doit $mvcmd "$dsttmp" "$dst" + } + fi || exit 1 + + trap '' 0 + fi +done + +# Local variables: +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "scriptversion=" +# time-stamp-format: "%:y-%02m-%02d.%02H" +# time-stamp-time-zone: "UTC" +# time-stamp-end: "; # UTC" +# End: diff --git a/Zoltan-3.90/config/missing b/Zoltan-3.90/config/missing new file mode 100755 index 00000000..86a8fc31 --- /dev/null +++ b/Zoltan-3.90/config/missing @@ -0,0 +1,331 @@ +#! /bin/sh +# Common stub for a few missing GNU programs while installing. + +scriptversion=2012-01-06.13; # UTC + +# Copyright (C) 1996, 1997, 1999, 2000, 2002, 2003, 2004, 2005, 2006, +# 2008, 2009, 2010, 2011, 2012 Free Software Foundation, Inc. +# Originally by Fran,cois Pinard , 1996. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +if test $# -eq 0; then + echo 1>&2 "Try \`$0 --help' for more information" + exit 1 +fi + +run=: +sed_output='s/.* --output[ =]\([^ ]*\).*/\1/p' +sed_minuso='s/.* -o \([^ ]*\).*/\1/p' + +# In the cases where this matters, `missing' is being run in the +# srcdir already. +if test -f configure.ac; then + configure_ac=configure.ac +else + configure_ac=configure.in +fi + +msg="missing on your system" + +case $1 in +--run) + # Try to run requested program, and just exit if it succeeds. + run= + shift + "$@" && exit 0 + # Exit code 63 means version mismatch. This often happens + # when the user try to use an ancient version of a tool on + # a file that requires a minimum version. In this case we + # we should proceed has if the program had been absent, or + # if --run hadn't been passed. + if test $? = 63; then + run=: + msg="probably too old" + fi + ;; + + -h|--h|--he|--hel|--help) + echo "\ +$0 [OPTION]... PROGRAM [ARGUMENT]... + +Handle \`PROGRAM [ARGUMENT]...' for when PROGRAM is missing, or return an +error status if there is no known handling for PROGRAM. + +Options: + -h, --help display this help and exit + -v, --version output version information and exit + --run try to run the given command, and emulate it if it fails + +Supported PROGRAM values: + aclocal touch file \`aclocal.m4' + autoconf touch file \`configure' + autoheader touch file \`config.h.in' + autom4te touch the output file, or create a stub one + automake touch all \`Makefile.in' files + bison create \`y.tab.[ch]', if possible, from existing .[ch] + flex create \`lex.yy.c', if possible, from existing .c + help2man touch the output file + lex create \`lex.yy.c', if possible, from existing .c + makeinfo touch the output file + yacc create \`y.tab.[ch]', if possible, from existing .[ch] + +Version suffixes to PROGRAM as well as the prefixes \`gnu-', \`gnu', and +\`g' are ignored when checking the name. + +Send bug reports to ." + exit $? + ;; + + -v|--v|--ve|--ver|--vers|--versi|--versio|--version) + echo "missing $scriptversion (GNU Automake)" + exit $? + ;; + + -*) + echo 1>&2 "$0: Unknown \`$1' option" + echo 1>&2 "Try \`$0 --help' for more information" + exit 1 + ;; + +esac + +# normalize program name to check for. +program=`echo "$1" | sed ' + s/^gnu-//; t + s/^gnu//; t + s/^g//; t'` + +# Now exit if we have it, but it failed. Also exit now if we +# don't have it and --version was passed (most likely to detect +# the program). This is about non-GNU programs, so use $1 not +# $program. +case $1 in + lex*|yacc*) + # Not GNU programs, they don't have --version. + ;; + + *) + if test -z "$run" && ($1 --version) > /dev/null 2>&1; then + # We have it, but it failed. + exit 1 + elif test "x$2" = "x--version" || test "x$2" = "x--help"; then + # Could not run --version or --help. This is probably someone + # running `$TOOL --version' or `$TOOL --help' to check whether + # $TOOL exists and not knowing $TOOL uses missing. + exit 1 + fi + ;; +esac + +# If it does not exist, or fails to run (possibly an outdated version), +# try to emulate it. +case $program in + aclocal*) + echo 1>&2 "\ +WARNING: \`$1' is $msg. You should only need it if + you modified \`acinclude.m4' or \`${configure_ac}'. You might want + to install the \`Automake' and \`Perl' packages. Grab them from + any GNU archive site." + touch aclocal.m4 + ;; + + autoconf*) + echo 1>&2 "\ +WARNING: \`$1' is $msg. You should only need it if + you modified \`${configure_ac}'. You might want to install the + \`Autoconf' and \`GNU m4' packages. Grab them from any GNU + archive site." + touch configure + ;; + + autoheader*) + echo 1>&2 "\ +WARNING: \`$1' is $msg. You should only need it if + you modified \`acconfig.h' or \`${configure_ac}'. You might want + to install the \`Autoconf' and \`GNU m4' packages. Grab them + from any GNU archive site." + files=`sed -n 's/^[ ]*A[CM]_CONFIG_HEADER(\([^)]*\)).*/\1/p' ${configure_ac}` + test -z "$files" && files="config.h" + touch_files= + for f in $files; do + case $f in + *:*) touch_files="$touch_files "`echo "$f" | + sed -e 's/^[^:]*://' -e 's/:.*//'`;; + *) touch_files="$touch_files $f.in";; + esac + done + touch $touch_files + ;; + + automake*) + echo 1>&2 "\ +WARNING: \`$1' is $msg. You should only need it if + you modified \`Makefile.am', \`acinclude.m4' or \`${configure_ac}'. + You might want to install the \`Automake' and \`Perl' packages. + Grab them from any GNU archive site." + find . -type f -name Makefile.am -print | + sed 's/\.am$/.in/' | + while read f; do touch "$f"; done + ;; + + autom4te*) + echo 1>&2 "\ +WARNING: \`$1' is needed, but is $msg. + You might have modified some files without having the + proper tools for further handling them. + You can get \`$1' as part of \`Autoconf' from any GNU + archive site." + + file=`echo "$*" | sed -n "$sed_output"` + test -z "$file" && file=`echo "$*" | sed -n "$sed_minuso"` + if test -f "$file"; then + touch $file + else + test -z "$file" || exec >$file + echo "#! /bin/sh" + echo "# Created by GNU Automake missing as a replacement of" + echo "# $ $@" + echo "exit 0" + chmod +x $file + exit 1 + fi + ;; + + bison*|yacc*) + echo 1>&2 "\ +WARNING: \`$1' $msg. You should only need it if + you modified a \`.y' file. You may need the \`Bison' package + in order for those modifications to take effect. You can get + \`Bison' from any GNU archive site." + rm -f y.tab.c y.tab.h + if test $# -ne 1; then + eval LASTARG=\${$#} + case $LASTARG in + *.y) + SRCFILE=`echo "$LASTARG" | sed 's/y$/c/'` + if test -f "$SRCFILE"; then + cp "$SRCFILE" y.tab.c + fi + SRCFILE=`echo "$LASTARG" | sed 's/y$/h/'` + if test -f "$SRCFILE"; then + cp "$SRCFILE" y.tab.h + fi + ;; + esac + fi + if test ! -f y.tab.h; then + echo >y.tab.h + fi + if test ! -f y.tab.c; then + echo 'main() { return 0; }' >y.tab.c + fi + ;; + + lex*|flex*) + echo 1>&2 "\ +WARNING: \`$1' is $msg. You should only need it if + you modified a \`.l' file. You may need the \`Flex' package + in order for those modifications to take effect. You can get + \`Flex' from any GNU archive site." + rm -f lex.yy.c + if test $# -ne 1; then + eval LASTARG=\${$#} + case $LASTARG in + *.l) + SRCFILE=`echo "$LASTARG" | sed 's/l$/c/'` + if test -f "$SRCFILE"; then + cp "$SRCFILE" lex.yy.c + fi + ;; + esac + fi + if test ! -f lex.yy.c; then + echo 'main() { return 0; }' >lex.yy.c + fi + ;; + + help2man*) + echo 1>&2 "\ +WARNING: \`$1' is $msg. You should only need it if + you modified a dependency of a manual page. You may need the + \`Help2man' package in order for those modifications to take + effect. You can get \`Help2man' from any GNU archive site." + + file=`echo "$*" | sed -n "$sed_output"` + test -z "$file" && file=`echo "$*" | sed -n "$sed_minuso"` + if test -f "$file"; then + touch $file + else + test -z "$file" || exec >$file + echo ".ab help2man is required to generate this page" + exit $? + fi + ;; + + makeinfo*) + echo 1>&2 "\ +WARNING: \`$1' is $msg. You should only need it if + you modified a \`.texi' or \`.texinfo' file, or any other file + indirectly affecting the aspect of the manual. The spurious + call might also be the consequence of using a buggy \`make' (AIX, + DU, IRIX). You might want to install the \`Texinfo' package or + the \`GNU make' package. Grab either from any GNU archive site." + # The file to touch is that specified with -o ... + file=`echo "$*" | sed -n "$sed_output"` + test -z "$file" && file=`echo "$*" | sed -n "$sed_minuso"` + if test -z "$file"; then + # ... or it is the one specified with @setfilename ... + infile=`echo "$*" | sed 's/.* \([^ ]*\) *$/\1/'` + file=`sed -n ' + /^@setfilename/{ + s/.* \([^ ]*\) *$/\1/ + p + q + }' $infile` + # ... or it is derived from the source name (dir/f.texi becomes f.info) + test -z "$file" && file=`echo "$infile" | sed 's,.*/,,;s,.[^.]*$,,'`.info + fi + # If the file does not exist, the user really needs makeinfo; + # let's fail without touching anything. + test -f $file || exit 1 + touch $file + ;; + + *) + echo 1>&2 "\ +WARNING: \`$1' is needed, and is $msg. + You might have modified some files without having the + proper tools for further handling them. Check the \`README' file, + it often tells you about the needed prerequisites for installing + this package. You may also peek at any GNU archive site, in case + some other package would contain this missing \`$1' program." + exit 1 + ;; +esac + +exit 0 + +# Local variables: +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "scriptversion=" +# time-stamp-format: "%:y-%02m-%02d.%02H" +# time-stamp-time-zone: "UTC" +# time-stamp-end: "; # UTC" +# End: diff --git a/Zoltan-3.90/config/replace-install-prefix.pl b/Zoltan-3.90/config/replace-install-prefix.pl new file mode 100755 index 00000000..8a9fb355 --- /dev/null +++ b/Zoltan-3.90/config/replace-install-prefix.pl @@ -0,0 +1,89 @@ +#!/usr/bin/perl -w +use strict; +use Getopt::Long; +# +# This script is called to do a set of text replacements for installing +# a Makefile.export.package file so that external clients can use it. +# +# Read in commandline arguments +# +my $exec_prefix = ""; # [required] Abs path to base installation directory (i.e. --prefix=??? option passed to configure) +my $my_export_makefile = ""; # [required] Name only of installed Makefile.export.package file +my $my_top_srcdir = ""; # [required] Abs path to this package's top source directory +my $my_incl_dirs = ""; # [required] Abs path to this package's include directories +my $my_lib_dirs = ""; # [optional] Abs path to this package's library directories (if any exist) +my $dep_package_builddirs = ""; # [optional] Abs paths to other directly dependent framework package build directories (if any exist) +GetOptions( + "exec-prefix=s" => \$exec_prefix, + "my-export-makefile=s" => \$my_export_makefile, + "my-abs-top-srcdir=s" => \$my_top_srcdir, + "my-abs-incl-dirs=s" => \$my_incl_dirs, + "my-abs-lib-dirs=s" => \$my_lib_dirs, + "dep-package-abs-builddirs=s" => \$dep_package_builddirs + ); +# +# Validate commandline arguments +# +scalar(@ARGV) == 0 || die; +$exec_prefix ne "" || die; +$my_export_makefile ne "" || die; +$my_top_srcdir ne "" || die; +$my_incl_dirs ne "" || die; +# +# Interpret commandline arguments +# +$exec_prefix = remove_rel_paths($exec_prefix); +my @my_incl_dirs = split(":",$my_incl_dirs); +my @my_lib_dirs = split(":",$my_lib_dirs); +my @dep_export_package_builddirs = split(":",$dep_package_builddirs); +# +# Do the replacements +# +my $my_abs_export_makefile = "${exec_prefix}/include/${my_export_makefile}"; + +my $cmnd_base = "${my_top_srcdir}/config/token-replace.pl "; +# +foreach(@dep_export_package_builddirs) { + if($_ ne "") { + run_cmnd($cmnd_base . "${_} ${exec_prefix}/include ${my_abs_export_makefile} ${my_abs_export_makefile}"); + } +} +# +foreach(@my_incl_dirs) { + if($_ ne "") { + run_cmnd($cmnd_base . "-I${_} -I${exec_prefix}/include ${my_abs_export_makefile} ${my_abs_export_makefile}"); + } +} +# +foreach(@my_lib_dirs) { + if($_ ne "") { + run_cmnd($cmnd_base . "-L${_} -L${exec_prefix}/lib ${my_abs_export_makefile} ${my_abs_export_makefile}"); + } +} +# +run_cmnd($cmnd_base . "${my_top_srcdir}/config ${exec_prefix}/include ${my_abs_export_makefile} ${my_abs_export_makefile}"); +# +# Subroutines +# +sub remove_rel_paths { + my $entry_in = shift; + if ($entry_in=~/-L\.\./) { + return $entry_in; + } + my @paths = split("/",$entry_in); + my @new_paths; + foreach( @paths ) { + if( !($_=~/\.\./) ) { + push @new_paths, $_; + } + else { + pop @new_paths + } + } + return join("/",@new_paths); +} +sub run_cmnd { + my $cmnd = shift; + #print "\n", $cmnd, "\n"; + system($cmnd)==0 || die; +} diff --git a/Zoltan-3.90/config/string-replace.pl b/Zoltan-3.90/config/string-replace.pl new file mode 100755 index 00000000..adeb1f43 --- /dev/null +++ b/Zoltan-3.90/config/string-replace.pl @@ -0,0 +1,43 @@ +#!/usr/bin/perl -w +# +# This perl script replaces a string with another string. +# Here it is allowd for file_in and file_out to be the +# same file. +# +use strict; +# +my $g_use_msg = + "Use: string-replace.pl find_string replacement_string file_in file_out\n"; +if( scalar(@ARGV) < 4 ) { + print STDERR $g_use_msg; + exit(-1); +} +# +my $find_string = shift; +my $replacement_string = shift; +my $file_in_name = shift; +my $file_out_name = shift; +# +# +if($file_in_name=~/CVS/) { +# print "Do not replace in CVS\n"; + exit; +} +# +open FILE_IN, "<$file_in_name" || die "The file $file_in_name could not be opended for input\n"; +my @file_in_array = ; +close FILE_IN; +# +my @file_out_array; +my $did_replacement = 0; +foreach(@file_in_array) { + #print $_; + $did_replacement = 1 if $_=~s/$find_string/$replacement_string/g; + #print $_; + push @file_out_array, $_; +} +if($did_replacement || $file_out_name ne $file_in_name) { + open FILE_OUT, ">$file_out_name" || die "The file $file_out_name could not be opended for output\n"; + print FILE_OUT @file_out_array; + close FILE_OUT; +} diff --git a/Zoltan-3.90/config/strip_dup_incl_paths.pl b/Zoltan-3.90/config/strip_dup_incl_paths.pl new file mode 100755 index 00000000..c628d311 --- /dev/null +++ b/Zoltan-3.90/config/strip_dup_incl_paths.pl @@ -0,0 +1,44 @@ +#!/usr/bin/perl -w +# This perl script removes duplicate include paths left to the right +use strict; +my @all_incl_paths = @ARGV; +my @cleaned_up_incl_paths; +foreach( @all_incl_paths ) { + $_ = remove_rel_paths($_); + if( !($_=~/-I/) ) { + push @cleaned_up_incl_paths, $_; + } + elsif( !entry_exists($_,\@cleaned_up_incl_paths) ) { + push @cleaned_up_incl_paths, $_; + } +} +print join( " ", @cleaned_up_incl_paths ); +# +# Subroutines +# +sub entry_exists { + my $entry = shift; # String + my $list = shift; # Reference to an array + foreach( @$list ) { + if( $entry eq $_ ) { return 1; } + } + return 0; +} +# +sub remove_rel_paths { + my $entry_in = shift; + if ($entry_in=~/-I\.\./) { + return $entry_in; + } + my @paths = split("/",$entry_in); + my @new_paths; + foreach( @paths ) { + if( !($_=~/\.\./) ) { + push @new_paths, $_; + } + else { + pop @new_paths + } + } + return join("/",@new_paths); +} diff --git a/Zoltan-3.90/config/strip_dup_libs.pl b/Zoltan-3.90/config/strip_dup_libs.pl new file mode 100755 index 00000000..cdf4b42a --- /dev/null +++ b/Zoltan-3.90/config/strip_dup_libs.pl @@ -0,0 +1,69 @@ +#!/usr/bin/perl -w +# This perl script removes duplicate libraries from the right to the left and +# removes duplicate -L library paths from the left to the right +use strict; + +my @all_libs = @ARGV; +# +# Move from left to right and remove duplicate -l libraries +# +my @cleaned_up_libs_first; +foreach( reverse @all_libs ) { + $_ = remove_rel_paths($_); + if( $_=~/-L/ ) { + unshift @cleaned_up_libs_first, $_; + } + else { + if( !entry_exists($_,\@cleaned_up_libs_first) ) { + unshift @cleaned_up_libs_first, $_; + } + } +} + +# +# Move from right to left and remove duplicate -L library paths +# +my @cleaned_up_libs; +foreach( @cleaned_up_libs_first ) { + $_ = remove_rel_paths($_); + if( !($_=~/-L/) ) { + push @cleaned_up_libs, $_; + } + elsif( !entry_exists($_,\@cleaned_up_libs) ) { + push @cleaned_up_libs, $_; + } +} +# +# Print the new list of libraries and paths +# +print join( " ", @cleaned_up_libs ); + +# +# Subroutines +# +sub entry_exists { + my $entry = shift; # String + my $list = shift; # Reference to an array + foreach( @$list ) { + if( $entry eq $_ ) { return 1; } + } + return 0; +} +# +sub remove_rel_paths { + my $entry_in = shift; + if ($entry_in=~/-L\.\./) { + return $entry_in; + } + my @paths = split("/",$entry_in); + my @new_paths; + foreach( @paths ) { + if( !($_=~/\.\./) ) { + push @new_paths, $_; + } + else { + pop @new_paths + } + } + return join("/",@new_paths); +} diff --git a/Zoltan-3.90/config/tac_arg_check_mpi.m4 b/Zoltan-3.90/config/tac_arg_check_mpi.m4 new file mode 100644 index 00000000..bac9cb9e --- /dev/null +++ b/Zoltan-3.90/config/tac_arg_check_mpi.m4 @@ -0,0 +1,67 @@ +dnl @synopsis TAC_ARG_CHECK_MPI +dnl +dnl Check to make sure any definitions set in TAC_ARG_CONFIG_MPI +dnl are valid, set the MPI flags. Test MPI compile using C++ compiler. +dnl +dnl @author Mike Heroux +dnl +AC_DEFUN([TAC_ARG_CHECK_MPI], +[ + +if test "X${HAVE_PKG_MPI}" = "Xyes"; then + + if test -n "${MPI_DIR}" && test -z "${MPI_INC}"; then + MPI_INC="${MPI_DIR}/include" + fi + + if test -n "${MPI_INC}"; then + CPPFLAGS="${CPPFLAGS} -I${MPI_INC}" + fi + + AC_LANG_CPLUSPLUS + AC_MSG_CHECKING(for mpi.h) + AC_TRY_CPP([#include "mpi.h"], + [AC_MSG_RESULT(yes)], + [ + AC_MSG_RESULT(no) + echo "-----" + echo "Cannot link simple MPI program." + echo "Try --with-mpi-compilers to specify MPI compilers." + echo "Or try --with-mpi-libs, --with-mpi-incdir, --with-mpi-libdir" + echo "to specify all the specific MPI compile options." + echo "-----" + AC_MSG_ERROR(MPI cannot link) + ]) + + if test -n "${MPI_DIR}" && test -z "${MPI_LIBDIR}"; then + MPI_LIBDIR="${MPI_DIR}/lib" + fi + + if test -n "${MPI_LIBDIR}"; then + LDFLAGS="${LDFLAGS} -L${MPI_LIBDIR}" + fi + + if test -z "${MPI_LIBS}" && test -n "${MPI_LIBDIR}"; then + MPI_LIBS="-lmpi" + fi + + if test -n "${MPI_LIBS}"; then + LIBS="${MPI_LIBS} ${LIBS}" + fi + +# AC_LANG_CPLUSPLUS +# AC_MSG_CHECKING(whether MPI will link using C++ compiler) +# AC_TRY_LINK([#include ], +# [int c; char** v; MPI_Init(&c,&v);], +# [AC_MSG_RESULT(yes)], +# [AC_MSG_RESULT(no) +# echo "-----" +# echo "Cannot link simple MPI program." +# echo "Or try --with-mpi-libs, --with-mpi-incdir, --with-mpi-libdir" +# echo "to specify all the specific MPI compile options." +# echo "-----" +# AC_MSG_ERROR(MPI cannot link)] +# ) + +fi +]) diff --git a/Zoltan-3.90/config/tac_arg_config_mpi.m4 b/Zoltan-3.90/config/tac_arg_config_mpi.m4 new file mode 100644 index 00000000..5345a3ef --- /dev/null +++ b/Zoltan-3.90/config/tac_arg_config_mpi.m4 @@ -0,0 +1,232 @@ +dnl @synopsis TAC_ARG_CONFIG_MPI +dnl +dnl Test a variety of MPI options: +dnl --enable-mpi - Turns MPI compiling mode on +dnl --with-mpi - specify root directory of MPI +dnl --with-mpi-compilers - Turns on MPI compiling mode and sets the MPI C++ +dnl compiler = mpicxx, mpic++ or mpiCC, +dnl the MPI C compiler = mpicc and +dnl the MPI Fortran compiler = mpif77 +dnl --with-mpi-incdir - specify include directory for MPI +dnl --with-mpi-libs - specify MPI libraries +dnl --with-mpi-libdir - specify location of MPI libraries +dnl +dnl If any of these options are set, HAVE_MPI will be defined for both +dnl Autoconf and Automake, and HAVE_MPI will be defined in the +dnl generated config.h file +dnl +dnl +dnl @author Mike Heroux +dnl Modified 12/26/2007 by Jim Willenbring to skip the Fortran compiler +dnl check if Fortran is not enabled. +dnl +AC_DEFUN([TAC_ARG_CONFIG_MPI], +[ + +AC_ARG_ENABLE(mpi, +[AC_HELP_STRING([--enable-mpi],[MPI support])], +[HAVE_PKG_MPI=$enableval], +[HAVE_PKG_MPI=yes] +) + +AC_ARG_WITH(mpi-compilers, +[AC_HELP_STRING([--with-mpi-compilers=PATH], +[use MPI compilers mpicc, mpif77, and mpicxx, mpic++ or mpiCC in the specified path or in the default path if no path is specified. Enables MPI])], +[ + if test X${withval} != Xno; then + HAVE_PKG_MPI=yes + if test X${withval} = Xyes; then + # Check for mpicxx, if it does not exist, check for mpic++, if it does + # not exist, use mpiCC instead. + AC_CHECK_PROG(MPI_TEMP_CXX, mpicxx, mpicxx, no) + if test X${MPI_TEMP_CXX} = Xno; then + AC_CHECK_PROG(MPI_CXX, mpic++, mpic++, mpiCC) + else + MPI_CXX=${MPI_TEMP_CXX} + fi + MPI_CC=mpicc + MPI_F77=mpif77 + MPI_F90=mpif90 + else + if test -f ${withval}/mpicxx; then + MPI_CXX=${withval}/mpicxx + elif test -f ${withval}/mpic++; then + MPI_CXX=${withval}/mpic++ + else + MPI_CXX=${withval}/mpiCC + fi + MPI_CC=${withval}/mpicc + MPI_F77=${withval}/mpif77 + MPI_F90=${withval}/mpif90 + fi + fi +], +[ + HAVE_PKG_MPI=yes + # Check for mpicxx, if it does not exist, check for mpic++, if it does + # not exist, use mpiCC instead. + AC_CHECK_PROG(MPI_TEMP_CXX, mpicxx, mpicxx, no) + if test X${MPI_TEMP_CXX} = Xno; then + AC_CHECK_PROG(MPI_CXX, mpic++, mpic++, mpiCC) + else + MPI_CXX=${MPI_TEMP_CXX} + fi + MPI_CC=mpicc + MPI_F77=mpif77 + MPI_F90=mpif90 +] +) + +AC_ARG_WITH(mpi, +[AC_HELP_STRING([--with-mpi=MPIROOT],[use MPI root directory (enables MPI)])], +[ + HAVE_PKG_MPI=yes + MPI_DIR=${withval} + AC_MSG_CHECKING(MPI directory) + AC_MSG_RESULT([${MPI_DIR}]) +] +) + +#AC_ARG_WITH(mpi-include, +#[AC_HELP_STRING([--with-mpi-include],[Obsolete. Use --with-mpi-incdir=DIR instead. Do not prefix DIR with '-I'.])], +#[AC_MSG_ERROR([--with-mpi-include is an obsolte option. Use --with-mpi-incdir=DIR instead. Do not prefix DIR with '-I'. For example '--with-mpi-incdir=/usr/lam_path/include'.])] +#) + +AC_ARG_WITH(mpi-libs, +[AC_HELP_STRING([--with-mpi-libs="LIBS"],[MPI libraries @<:@"-lmpi"@:>@])], +[ + MPI_LIBS=${withval} + AC_MSG_CHECKING(user-defined MPI libraries) + AC_MSG_RESULT([${MPI_LIBS}]) +] +) + +AC_ARG_WITH(mpi-incdir, +[AC_HELP_STRING([--with-mpi-incdir=DIR],[MPI include directory @<:@MPIROOT/include@:>@ Do not use -I])], +[ + MPI_INC=${withval} + AC_MSG_CHECKING(user-defined MPI includes) + AC_MSG_RESULT([${MPI_INC}]) +] +) + +AC_ARG_WITH(mpi-libdir, +[AC_HELP_STRING([--with-mpi-libdir=DIR],[MPI library directory @<:@MPIROOT/lib@:>@ Do not use -L])], +[ + MPI_LIBDIR=${withval} + AC_MSG_CHECKING(user-defined MPI library directory) + AC_MSG_RESULT([${MPI_LIBDIR}]) +] +) + +AC_MSG_CHECKING(whether we are using MPI) +AC_MSG_RESULT([${HAVE_PKG_MPI}]) + +if test "X${HAVE_PKG_MPI}" = "Xyes"; then + AC_DEFINE(HAVE_MPI,,[define if we want to use MPI]) +fi + +dnl Define Automake version of HAVE_MPI if appropriate + +AM_CONDITIONAL(HAVE_MPI, [test "X${HAVE_PKG_MPI}" = "Xyes"]) + + +dnl +dnl -------------------------------------------------------------------- +dnl Check for MPI compilers (must be done *before* AC_PROG_CXX, +dnl AC_PROG_CC and AC_PROG_F77) +dnl +dnl -------------------------------------------------------------------- + +if test "X$ac_cv_use_zoltan_cppdriver" = "Xyes"; then + +if test -n "${MPI_CXX}"; then + if test -f ${MPI_CXX}; then + MPI_CXX_EXISTS=yes + else + AC_CHECK_PROG(MPI_CXX_EXISTS, ${MPI_CXX}, yes, no) + fi + + if test "X${MPI_CXX_EXISTS}" = "Xyes"; then + CXX=${MPI_CXX} + else + echo "-----" + echo "Cannot find MPI C++ compiler ${MPI_CXX}." + echo "Specify a path to all mpi compilers with --with-mpi-compilers=PATH" + echo "or specify a C++ compiler using CXX=" + echo "Do not use --with-mpi-compilers if using CXX=" + echo "-----" + AC_MSG_ERROR([MPI C++ compiler (${MPI_CXX}) not found.]) + fi +fi + +fi dnl ac_cv_use_zoltan_cppdriver + +if test -n "${MPI_CC}"; then + if test -f ${MPI_CC}; then + MPI_CC_EXISTS=yes + else + AC_CHECK_PROG(MPI_CC_EXISTS, ${MPI_CC}, yes, no) + fi + + if test "X${MPI_CC_EXISTS}" = "Xyes"; then + CC=${MPI_CC} + else + echo "-----" + echo "Cannot find MPI C compiler ${MPI_CC}." + echo "Specify a path to all mpi compilers with --with-mpi-compilers=PATH" + echo "or specify a C compiler using CC=" + echo "Do not use --with-mpi-compilers if using CC=" + echo "-----" + AC_MSG_ERROR([MPI C compiler (${MPI_CC}) not found.]) + fi +fi + +if test "X$ac_cv_use_fortran" = "Xyes"; then + +if test -n "${MPI_F77}"; then + if test -f ${MPI_F77}; then + MPI_F77_EXISTS=yes + else + AC_CHECK_PROG(MPI_F77_EXISTS, ${MPI_F77}, yes, no) + fi + + if test "X${MPI_F77_EXISTS}" = "Xyes"; then + F77=${MPI_F77} + else + echo "-----" + echo "Cannot find MPI Fortran compiler ${MPI_F77}." + echo "Specify a path to all mpi compilers with --with-mpi-compilers=PATH" + echo "or specify a Fortran 77 compiler using F77=" + echo "Do not use --with-mpi-compilers if using F77=" + echo "-----" + AC_MSG_ERROR([MPI Fortran 77 compiler (${MPI_F77}) not found.]) + fi +fi + +if test "X$ac_cv_use_fortran90" = "Xyes"; then + +if test -n "${MPI_F90}"; then + if test -f ${MPI_F90}; then + MPI_F90_EXISTS=yes + else + AC_CHECK_PROG(MPI_F90_EXISTS, ${MPI_F90}, yes, no) + fi + + if test "X${MPI_F90_EXISTS}" = "Xyes"; then + FC=${MPI_F90} + else + echo "-----" + echo "Cannot find MPI Fortran compiler ${MPI_F90}." + echo "Specify a path to all mpi compilers with --with-mpi-compilers=PATH" + echo "or specify a Fortran compiler using FC=" + echo "Do not use --with-mpi-compilers if using FC=" + echo "-----" + AC_MSG_ERROR([MPI Fortran compiler (${MPI_F90}) not found.]) + fi +fi + +fi dnl ac_cv_use_fortran90 + +fi dnl ac_cv_use_fortran +]) diff --git a/Zoltan-3.90/config/tac_arg_enable_export-makefiles.m4 b/Zoltan-3.90/config/tac_arg_enable_export-makefiles.m4 new file mode 100644 index 00000000..b7a8b38d --- /dev/null +++ b/Zoltan-3.90/config/tac_arg_enable_export-makefiles.m4 @@ -0,0 +1,76 @@ +dnl Enables export makefile specific code +dnl +dnl The following AM_CONDITIONALS are set for makefiles to access: +dnl USING_EXPORT_MAKEFILES +dnl USING_PERL via TAC_ARG_WITH_PERL +dnl USING_GNUMAKE +dnl +dnl The following AC_DEFINES are set: +dnl HAVE_EXPORT_MAKEFILES +dnl +dnl the following variables are set: +dnl PERL_EXE for the perl executable via TAC_ARG_WITH_PERL +dnl +dnl This file was based on tac_arg_enable_feature.m4 by Mike Heroux +dnl @author Roger Pawlowski +dnl +AC_DEFUN([TAC_ARG_ENABLE_EXPORT_MAKEFILES], +[ +AC_ARG_ENABLE(export-makefiles, +AC_HELP_STRING([--enable-export-makefiles],[Creates export makefiles in the install (prefix) directory. This option requires perl to be set in your path or defined with --with-perl=. Note that the export makefiles are always created and used in the build directory, but will not be installable without this option to change the paths. (default is $1)]), +ac_cv_use_export_makefiles=$enableval, +ac_cv_use_export_makefiles=$1) + +AC_MSG_CHECKING(whether to build export makefiles) + +if test "X$ac_cv_use_export_makefiles" != "Xno"; then + + AC_MSG_RESULT(yes) + AC_DEFINE([HAVE_EXPORT_MAKEFILES],,[Define if you want to build export makefiles.]) + +else + + AC_MSG_RESULT(no) + +fi + +AM_CONDITIONAL(USING_EXPORT_MAKEFILES, test X${ac_cv_use_export_makefiles} = Xyes) + +# Check for perl to run scripts (Required dependency) +TAC_ARG_WITH_PERL + +if test "X$HAVE_PERL" != "Xyes" && + test "X$ac_cv_use_export_makefiles" != "Xno"; then + AC_MSG_RESULT(no) + AC_MSG_ERROR([Failed to find the perl executable. The flag --enable-export-makefiles requires perl to be either in your path or explicitly defined by the flag --with-perl=. If you do not require the export makefiles to be installed via 'make install', you can disable the export makefiles with --disable-export-makefiles.]) +fi + +# Check for using gnumake to clean up link lines via +# gnumake's "shell" command. Optional dependency. +AC_DEFUN([TAC_ARG_WITH_GNUMAKE], +[ +AC_ARG_WITH(gnumake, +AC_HELP_STRING([--with-gnumake],[Gnu's make has special functions we can use to eliminate redundant paths in the build and link lines. Enable this if you use gnu-make to build Trilinos. This requires that perl is in your path or that you have specified the perl executable with --with-perl=. Configure will check for the existence of the perl executable and quit with an error if it is not found. (default is no)]), +ac_cv_use_gnumake=$withval, ac_cv_use_gnumake=no) + +AC_MSG_CHECKING(whether gnumake specific code should be enabled) + +if test "X$ac_cv_use_gnumake" != "Xno"; then + AC_MSG_RESULT(yes) + AC_DEFINE([HAVE_GNUMAKE],,[Define if you are using gnumake - this will shorten your link lines.]) +else + AC_MSG_RESULT(no) +fi +AM_CONDITIONAL(USING_GNUMAKE, test "X$ac_cv_use_gnumake" = "Xyes") +]) + +TAC_ARG_WITH_GNUMAKE + +if test "X$HAVE_PERL" != "Xyes" && + test "X$ac_cv_use_gnumake" != "Xno"; then + AC_MSG_RESULT(no) + AC_MSG_ERROR([The flag --with-gnumake requires perl to be in your path. The perl executable can alternatively be explicitly defined by the flag --with-perl=.]) +fi + +]) + diff --git a/Zoltan-3.90/config/tac_arg_enable_feature.m4 b/Zoltan-3.90/config/tac_arg_enable_feature.m4 new file mode 100644 index 00000000..4e22753a --- /dev/null +++ b/Zoltan-3.90/config/tac_arg_enable_feature.m4 @@ -0,0 +1,40 @@ +dnl @synopsis TAC_ARG_ENABLE_FEATURE(FEATURE_NAME, FEATURE_DESCRIPTION, HAVE_NAME, DEFAULT_VAL) +dnl +dnl Test for --enable-${FEATURE_NAME} and set to DEFAULT_VAL value if feature not specified. +dnl Also calls AC_DEFINE to define HAVE_${HAVE_NAME} if value is not equal to "no" +dnl +dnl Use this macro to help defining whether or not optional +dnl features* should compiled. For example: +dnl +dnl TAC_ARG_ENABLE_FEATURE(epetra, [Configure and build epetra], EPETRA, yes) +dnl +dnl will test for --enable-epetra when configure is run. If it is defined +dnl and not set to "no" or not defined (default is "yes") then HAVE_EPETRA will +dnl be defined, if --enable-epetra is defined to be "no", HAVE_EPETRA will not +dnl be defined. +dnl +dnl *NOTE: epetra, aztecoo, komplex, ifpack, and other software found in +dnl subdirectories of Trilinos/packages are "packages" in their own right. +dnl However, these packages are also "features" of the larger package +dnl "Trilinos". Therefore, when configuring from the Trilinos directory, +dnl it is appropriate to refer to these software packages as "features". +dnl +dnl This file was based on tac_arg_with_package.m4 by Mike Heroux +dnl @author James Willenbring +dnl +AC_DEFUN([TAC_ARG_ENABLE_FEATURE], +[ +AC_ARG_ENABLE([$1], +AC_HELP_STRING([--enable-$1],[$2 (default is [$4])]), +ac_cv_use_$1=$enableval, ac_cv_use_$1=$4) + +AC_MSG_CHECKING(whether to use [$1]) + +if test "X$ac_cv_use_$1" != "Xno"; then + AC_MSG_RESULT(yes) + AC_DEFINE([HAVE_$3],,[Define if want to build $1]) +else + AC_MSG_RESULT(no) +fi +]) + diff --git a/Zoltan-3.90/config/tac_arg_enable_feature_sub.m4 b/Zoltan-3.90/config/tac_arg_enable_feature_sub.m4 new file mode 100644 index 00000000..164ca74e --- /dev/null +++ b/Zoltan-3.90/config/tac_arg_enable_feature_sub.m4 @@ -0,0 +1,52 @@ +dnl @synopsis TAC_ARG_ENABLE_FEATURE_SUB(FEATURE_NAME, SUB_FEATURE_NAME, FEATURE_DESCRIPTION, HAVE_NAME, DEFAULT_VAL) +dnl +dnl This hack gets around the fact that TAC_ARG_ENABLE_FEATURE does not support underscores +dnl in its feature names. TAC_ARG_ENABLE_FEATURE_SUB allows exactly one underscore. Not great, +dnl but arguably better than supporting no underscores. +dnl +dnl TAC_ARG_ENABLE_FEATURE(feature-sub, [Configure and build feature-sub], FEATURE_SUB, yes) +dnl fails because tac_arg_enable_feature tests for ac_cv_use_feature-sub which gets +dnl rejected because the `-' is not allowed in variables. (AC_ARG_ENABLE sets ac_cv_use_feature_sub +dnl to avoid this problem.) Use: +dnl +dnl TAC_ARG_ENABLE_FEATURE(feature, sub, [Configure and build feature-sub], FEATURE_SUB, yes) +dnl instead. +dnl +dnl Test for --enable-${FEATURE_NAME} and set to DEFAULT_VAL value if feature not specified. +dnl Also calls AC_DEFINE to define HAVE_${HAVE_NAME} if value is not equal to "no" +dnl +dnl Use this macro to help defining whether or not optional +dnl features* should compiled. For example: +dnl +dnl TAC_ARG_ENABLE_FEATURE(epetra, [Configure and build epetra], EPETRA, yes) +dnl +dnl will test for --enable-epetra when configure is run. If it is defined +dnl and not set to "no" or not defined (default is "yes") then HAVE_EPETRA will +dnl be defined, if --enable-epetra is defined to be "no", HAVE_EPETRA will not +dnl be defined. +dnl +dnl *NOTE: epetra, aztecoo, komplex, ifpack, and other software found in +dnl subdirectories of Trilinos/packages are "packages" in their own right. +dnl However, these packages are also "features" of the larger package +dnl "Trilinos". Therefore, when configuring from the Trilinos directory, +dnl it is appropriate to refer to these software packages as "features". +dnl +dnl This file was based on tac_arg_enable_package.m4 by Jim Willenbring +dnl @author Ken Stanley +dnl +AC_DEFUN([TAC_ARG_ENABLE_FEATURE_SUB], +[ +AC_ARG_ENABLE([$1-$2], +AC_HELP_STRING([--enable-$1-$2],[$3 (default is [$5])]), +ac_cv_use_$1_$2=$enableval, ac_cv_use_$1_$2=$5) + +AC_MSG_CHECKING(whether to use [$1-$2]) + +if test "X$ac_cv_use_$1_$2" != "Xno"; then + AC_MSG_RESULT(yes) + AC_DEFINE([HAVE_$4],,[Define if want to build $1-$2]) +else + AC_MSG_RESULT(no) +fi +]) + diff --git a/Zoltan-3.90/config/tac_arg_enable_feature_sub_check.m4 b/Zoltan-3.90/config/tac_arg_enable_feature_sub_check.m4 new file mode 100755 index 00000000..b3876fd7 --- /dev/null +++ b/Zoltan-3.90/config/tac_arg_enable_feature_sub_check.m4 @@ -0,0 +1,54 @@ +dnl @synopsis TAC_ARG_ENABLE_FEATURE_SUB_CHECK(FEATURE_NAME, SUB_FEATURE_NAME, FEATURE_DESCRIPTION, HAVE_NAME) +dnl +dnl This hack gets around the fact that TAC_ARG_ENABLE_FEATURE does not support underscores +dnl in its feature names. TAC_ARG_ENABLE_FEATURE_SUB_CHECK allows exactly one underscore. Not great, +dnl but arguably better than supporting no underscores. +dnl +dnl TAC_ARG_ENABLE_FEATURE(feature-sub, [Configure and build feature-sub], FEATURE_SUB, yes) +dnl fails because tac_arg_enable_feature tests for ac_cv_use_feature-sub which gets +dnl rejected because the `-' is not allowed in variables. (AC_ARG_ENABLE sets ac_cv_use_feature_sub +dnl to avoid this problem.) Use: +dnl +dnl TAC_ARG_ENABLE_FEATURE_SUB_CHECK(feature, sub, [Configure and build feature-sub], FEATURE_SUB) +dnl instead. +dnl +dnl This macro will test for --enable-${FEATURE_NAME}-${SUB_FEATURE_NAME} when configure is run. +dnl If it is defined and not set to "no" or not defined and --disable-${SUB_FEATURE_NAME} is not +dnl specified then HAVE_${HAVE_NAME} will be defined. +dnl +dnl *NOTE: This macro is designed for the use-case when there is an individual Trilinos package +dnl offering fine-grained control of a Trilinos option. This way, the individual package +dnl option is enabled, as long as the Trilinos option is not disabled. If the Trilinos option is +dnl disabled, then the user must enable each packages option individually. For instance: +dnl +dnl --disable-tests --enable-teuchos-tests +dnl +dnl *NOTE: epetra, aztecoo, komplex, ifpack, and other software found in +dnl subdirectories of Trilinos/packages are "packages" in their own right. +dnl However, these packages are also "features" of the larger package +dnl "Trilinos". Therefore, when configuring from the Trilinos directory, +dnl it is appropriate to refer to these software packages as "features". +dnl +dnl This file was based on tac_arg_enable_package.m4 by Jim Willenbring +dnl and tac_arg_enable_package_sub.m4 by Ken Stanley. +dnl +dnl @author Heidi Thornquist +dnl +AC_DEFUN([TAC_ARG_ENABLE_FEATURE_SUB_CHECK], +[ +AC_ARG_ENABLE([$2],, ac_cv_use_$2=$enableval, ac_cv_use_$2=yes) + +AC_ARG_ENABLE([$1-$2], +AC_HELP_STRING([--enable-$1-$2],[$3 (default is yes if --disable-$2 is not specified)]), +ac_cv_use_$1_$2=$enableval, ac_cv_use_$1_$2=${ac_cv_use_$2}) + +AC_MSG_CHECKING(whether to use [$1-$2]) + +if test "X$ac_cv_use_$1_$2" != "Xno"; then + AC_MSG_RESULT(yes) + AC_DEFINE([HAVE_$4],,[Define if want to build $1-$2]) +else + AC_MSG_RESULT(no) +fi +]) + diff --git a/Zoltan-3.90/config/tac_arg_enable_option.m4 b/Zoltan-3.90/config/tac_arg_enable_option.m4 new file mode 100644 index 00000000..8b39f066 --- /dev/null +++ b/Zoltan-3.90/config/tac_arg_enable_option.m4 @@ -0,0 +1,30 @@ +dnl @synopsis TAC_ARG_ENABLE_OPTION(FEATURE_NAME, FEATURE_DESCRIPTION, HAVE_NAME, DEFAULT_VAL) +dnl +dnl Test for --enable-${FEATURE_NAME} and set to DEFAULT_VAL value if feature not specified. +dnl Also calls AC_DEFINE to define HAVE_${HAVE_NAME} if value is not equal to "no" +dnl +dnl Use this macro to facilitate definition of options in a package. For example: +dnl +dnl TAC_ARG_ENABLE_OPTION(threads, [enable shared memory threads], THREADS, no) +dnl +dnl will test for --enable-threads when configure is run. If it is defined (and not set to "no") +dnl then HAVE_THREADS will be defined, Otherwise HAVE_THREADS will not be defined. +dnl +dnl @author Mike Heroux +dnl +AC_DEFUN([TAC_ARG_ENABLE_OPTION], +[ +AC_ARG_ENABLE([$1], +AC_HELP_STRING([--enable-$1],[$2 (default is [$4])]), +ac_cv_use_$1=$enableval, ac_cv_use_$1=$4) + +AC_MSG_CHECKING(whether to use [$1]) + +if test "X$ac_cv_use_$1" != "Xno"; then + AC_MSG_RESULT(yes) + AC_DEFINE([HAVE_$3],1,[Define if want to build with $1 enabled]) +else + AC_MSG_RESULT(no) +fi +]) + diff --git a/Zoltan-3.90/config/tac_arg_with_3pl_sub.m4 b/Zoltan-3.90/config/tac_arg_with_3pl_sub.m4 new file mode 100644 index 00000000..ce6c031d --- /dev/null +++ b/Zoltan-3.90/config/tac_arg_with_3pl_sub.m4 @@ -0,0 +1,17 @@ +dnl @synopsis TAC_ARG_WITH_3PL_SUB( VALUE_NAME, VALUE_SUB_NAME, VALUE_DESCRIPTION) +dnl +dnl Test for --with-${VALUE_NAME}-${VALUE_SUB_NAME} and set to no if value not specified. +dnl +dnl Use this macro to set variables, such as library names and include paths, which +dnl include an underscore. +dnl +dnl This file was based on tac_arg_with_sub.m4 by Ken Stanley +dnl @author Jim Willenbring +dnl +AC_DEFUN([TAC_ARG_WITH_3PL_SUB], +[ +AC_ARG_WITH([$1-$2], +AC_HELP_STRING([--with-$1-$2],[$3]), +tac_with_$1_$2=$withval, tac_with_$1_$2=no) +]) + diff --git a/Zoltan-3.90/config/tac_arg_with_ar.m4 b/Zoltan-3.90/config/tac_arg_with_ar.m4 new file mode 100644 index 00000000..9568f3e5 --- /dev/null +++ b/Zoltan-3.90/config/tac_arg_with_ar.m4 @@ -0,0 +1,39 @@ +dnl @synopsis TAC_ARG_WITH_AR +dnl +dnl Test for --with-ar="ar_program ar_flags". +dnl Default is "ar cru" +dnl +dnl Generates an Automake conditional USE_ALTERNATE_AR that can be tested. +dnl Generates the user-specified archiver command in @ALTERNATE_AR@. +dnl +dnl @author Mike Heroux +dnl +AC_DEFUN([TAC_ARG_WITH_AR], +[ +AC_ARG_WITH(ar, +AC_HELP_STRING([--with-ar], [override archiver command (default is "ar cru")]), +[ +AC_MSG_CHECKING(user-defined archiver) +AC_MSG_RESULT([${withval}]) +USE_ALTERNATE_AR=yes +ALTERNATE_AR="${withval}" +] +) + +if test -n "${SPECIAL_AR}" && test "X${USE_ALTERNATE_AR}" != "Xyes"; +then + USE_ALTERNATE_AR=yes + ALTERNATE_AR="${SPECIAL_AR}" +fi + +AC_MSG_CHECKING(for special archiver command) +if test "X${USE_ALTERNATE_AR}" = "Xyes"; then + AC_MSG_RESULT([${ALTERNATE_AR}]) + AM_CONDITIONAL(USE_ALTERNATE_AR, true) +else + AC_MSG_RESULT([none]) + AM_CONDITIONAL(USE_ALTERNATE_AR, false) +fi +AC_SUBST(ALTERNATE_AR) +]) + diff --git a/Zoltan-3.90/config/tac_arg_with_flags.m4 b/Zoltan-3.90/config/tac_arg_with_flags.m4 new file mode 100644 index 00000000..256450ac --- /dev/null +++ b/Zoltan-3.90/config/tac_arg_with_flags.m4 @@ -0,0 +1,31 @@ +dnl @synopsis TAC_ARG_WITH_FLAGS(lcase_name, UCASE_NAME) +dnl +dnl Test for --with-lcase_name="compiler/loader flags". if defined, prepend +dnl flags to standard UCASE_NAME definition. +dnl +dnl Use this macro to facilitate additional special flags that should be +dnl passed on to the preprocessor/compilers/loader. +dnl +dnl Example use +dnl +dnl TAC_ARG_WITH_FLAGS(cxxflags, CXXFLAGS) +dnl +dnl tests for --with-cxxflags and pre-pends to CXXFLAGS +dnl +dnl +dnl @author Mike Heroux +dnl +AC_DEFUN([TAC_ARG_WITH_FLAGS], +[ +AC_MSG_CHECKING([whether additional [$2] flags should be added]) +AC_ARG_WITH($1, +AC_HELP_STRING([--with-$1], +[additional [$2] flags to be added: will prepend to [$2]]), +[ +$2="${withval} ${$2}" +AC_MSG_RESULT([$2 = ${$2}]) +], +AC_MSG_RESULT(no) +) +]) + diff --git a/Zoltan-3.90/config/tac_arg_with_incdirs.m4 b/Zoltan-3.90/config/tac_arg_with_incdirs.m4 new file mode 100644 index 00000000..ee81a642 --- /dev/null +++ b/Zoltan-3.90/config/tac_arg_with_incdirs.m4 @@ -0,0 +1,27 @@ +dnl @synopsis TAC_ARG_WITH_INCDIRS +dnl +dnl Test for --with-incdirs="-Iincdir1 -Iincdir2". if defined, prepend +dnl "-Iincdir1 -Iincdir2" to CPPFLAGS +dnl +dnl Use this macro to facilitate addition of directories to include file search path. +dnl +dnl +dnl @author Mike Heroux +dnl +AC_DEFUN([TAC_ARG_WITH_INCDIRS], +[ +AC_MSG_CHECKING([whether additional include search paths defined]) +AC_ARG_WITH(incdirs, +AC_HELP_STRING([--with-incdirs], +[additional directories containing include files: will prepend to search here for includes, use -Idir format]), +[ +CPPFLAGS="${withval} ${CPPFLAGS}" +CFLAGS="${withval} ${CFLAGS}" +FCFLAGS="${withval} ${FCFLAGS}" +FFLAGS="${withval} ${FFLAGS}" +AC_MSG_RESULT([${withval}]) +], +AC_MSG_RESULT(no) +) +]) + diff --git a/Zoltan-3.90/config/tac_arg_with_libdirs.m4 b/Zoltan-3.90/config/tac_arg_with_libdirs.m4 new file mode 100644 index 00000000..b2f94381 --- /dev/null +++ b/Zoltan-3.90/config/tac_arg_with_libdirs.m4 @@ -0,0 +1,24 @@ +dnl @synopsis TAC_ARG_WITH_LIBDIRS +dnl +dnl Test for --with-libdirs="-Llibdir1 -Llibdir2". if defined, +dnl prepend "-Llibdir1 -Llibdir2" to LDFLAGS +dnl +dnl Use this macro to facilitate addition of directories to library search path. +dnl +dnl +dnl @author Mike Heroux +dnl +AC_DEFUN([TAC_ARG_WITH_LIBDIRS], +[ +AC_MSG_CHECKING([whether additional library search paths defined]) +AC_ARG_WITH(libdirs, +AC_HELP_STRING([--with-libdirs], +[OBSOLETE use --with-ldflags instead. (ex. --with-ldflags="-L -L")]), +[ +LDFLAGS="${withval} ${LDFLAGS}" +AC_MSG_RESULT([${withval}]) +], +AC_MSG_RESULT(no) +) +]) + diff --git a/Zoltan-3.90/config/tac_arg_with_libs.m4 b/Zoltan-3.90/config/tac_arg_with_libs.m4 new file mode 100644 index 00000000..3a648807 --- /dev/null +++ b/Zoltan-3.90/config/tac_arg_with_libs.m4 @@ -0,0 +1,30 @@ +dnl @synopsis TAC_ARG_WITH_LIBS +dnl +dnl Test for --with-libs="name(s)". +dnl +dnl Prepends the specified name(s) to the list of libraries to link +dnl with. +dnl +dnl Example use +dnl +dnl TAC_ARG_WITH_LIBS +dnl +dnl tests for --with-libs and pre-pends to LIBS +dnl +dnl @author Jim Willenbring +dnl +AC_DEFUN([TAC_ARG_WITH_LIBS], +[ +AC_MSG_CHECKING([whether additional libraries are needed]) +AC_ARG_WITH(libs, +AC_HELP_STRING([--with-libs], +[List additional libraries here. For example, --with-libs=-lsuperlu +or --with-libs=/path/libsuperlu.a]), +[ +LIBS="${withval} ${LIBS}" +AC_MSG_RESULT([LIBS = ${LIBS}]) +], +AC_MSG_RESULT(no) +) +] +) diff --git a/Zoltan-3.90/config/tac_arg_with_package.m4 b/Zoltan-3.90/config/tac_arg_with_package.m4 new file mode 100644 index 00000000..8706f5dc --- /dev/null +++ b/Zoltan-3.90/config/tac_arg_with_package.m4 @@ -0,0 +1,32 @@ +dnl @synopsis TAC_ARG_WITH_PACKAGE(FEATURE_NAME, FEATURE_DESCRIPTION, HAVE_NAME, DEFAULT_VAL) +dnl +dnl Test for --with-${FEATURE_NAME} and set to DEFAULT_VAL value if feature not specified. +dnl Also calls AC_DEFINE to define HAVE_${HAVE_NAME} if value is not equal to "no" +dnl +dnl Use this macro to help defining whether or not interfaces for optional +dnl package should compiled. For example: +dnl +dnl TAC_ARG_WITH_PACKAGE(zoltan, [Enable Zoltan interface support], ZOLTAN, no) +dnl +dnl will test for --with-zoltan when configure is run. If it is defined +dnl (and not set to "no") then HAVE_ZOLTAN will be defined, +dnl Otherwise HAVE_ZOLTAN will not be defined. +dnl +dnl @author Mike Heroux +dnl +AC_DEFUN([TAC_ARG_WITH_PACKAGE], +[ +AC_ARG_WITH([$1], +AC_HELP_STRING([--with-$1],[$2 (default is [$4])]), +ac_cv_use_$1=$withval, ac_cv_use_$1=$4) + +AC_MSG_CHECKING(whether to use [$1]) + +if test "X$ac_cv_use_$1" != "Xno"; then + AC_MSG_RESULT(yes) + AC_DEFINE([HAVE_$3],,[Define if want to build with $1 enabled]) +else + AC_MSG_RESULT(no) +fi +]) + diff --git a/Zoltan-3.90/config/tac_arg_with_perl.m4 b/Zoltan-3.90/config/tac_arg_with_perl.m4 new file mode 100644 index 00000000..63e74ba9 --- /dev/null +++ b/Zoltan-3.90/config/tac_arg_with_perl.m4 @@ -0,0 +1,34 @@ +dnl @synopsis TAC_ARG_WITH_PERL(DEFAULT_VAL) +dnl +dnl Test for --enable-gnumake and set to DEFAULT_VAL value if feature not specified. +dnl Calls AC_DEFINE to define HAVE_GNUMAKE if value is not equal to "no" +dnl Calls AM_CONDITIONAL to define USING_GNUMAKE to true/false. +dnl +dnl This file was based on tac_arg_with_ar.m4 by Mike Heroux +dnl @author Roger Pawlowski +dnl +AC_DEFUN([TAC_ARG_WITH_PERL], +[ + +AC_ARG_WITH(perl, +AC_HELP_STRING([--with-perl], [supply a perl executable. For example --with-perl=/usr/bin/perl.]), +[ +AC_MSG_CHECKING(for user supplied perl executable) +AC_MSG_RESULT([${withval}]) +USER_SPECIFIED_PERL=yes +PERL_EXE="${withval}" +], +[ +USER_SPECIFIED_PERL=no +]) + +if test "X${USER_SPECIFIED_PERL}" = "Xyes"; then + AC_CHECK_FILE(${PERL_EXE}, [HAVE_PERL=yes], [HAVE_PERL=no]) + AC_SUBST(PERL_EXE, ${PERL_EXE}) +else + AC_CHECK_PROG(HAVE_PERL, perl, yes, no) + AC_SUBST(PERL_EXE, perl) +fi +AM_CONDITIONAL(USING_PERL, test X${HAVE_PERL} = Xyes) +]) + diff --git a/Zoltan-3.90/config/token-replace.pl b/Zoltan-3.90/config/token-replace.pl new file mode 100755 index 00000000..c3b413ec --- /dev/null +++ b/Zoltan-3.90/config/token-replace.pl @@ -0,0 +1,43 @@ +#!/usr/bin/perl -w +# +# This perl script replaces a string with another string +# on a token basis. Here it is allowed for file_in and +# file_out to be the same file. +# +use strict; +# +my $g_use_msg = + "Use: token-replace.pl find_token replacement_token file_in file_out\n"; +if( scalar(@ARGV) < 4 ) { + print STDERR $g_use_msg; + exit(-1); +} +# +my $find_token = shift; +my $replacement_token = shift; +my $file_in_name = shift; +my $file_out_name = shift; +# +#print "file_in_name = $file_in_name\n"; +if($file_in_name=~/CVS/) { +# print "Do not replace in CVS\n"; + exit; +} +open FILE_IN, "<$file_in_name" || die "The file $file_in_name could not be opended for input\n"; +my @file_in_array = ; +close FILE_IN; +# +my $match_str = '([^\w\d_]|^)' . $find_token . '([^\w\d_]|$)'; +#print $match_str . "\n"; +# +my @file_out_array; +my $did_replacement = 0; +foreach(@file_in_array) { + $did_replacement = 1 if $_=~s/$match_str/$1$replacement_token$2/g; + push @file_out_array, $_; +} +if($did_replacement || $file_out_name ne $file_in_name) { + open FILE_OUT, ">$file_out_name" || die "The file $file_out_name could not be opended for output\n"; + print FILE_OUT @file_out_array; + close FILE_OUT; +} diff --git a/Zoltan-3.90/config/wk_fc_get_vendor.m4 b/Zoltan-3.90/config/wk_fc_get_vendor.m4 new file mode 100644 index 00000000..1802e0c5 --- /dev/null +++ b/Zoltan-3.90/config/wk_fc_get_vendor.m4 @@ -0,0 +1,104 @@ +dnl Determine F90 vendor and version string. +AC_DEFUN([WK_FC_GET_VENDOR], +[AC_CACHE_CHECK([the compiler ID], +[wk_cv_prog_f90_version_string], +[$FC -version >conftest.log 2>&1 +$FC -V >>conftest.log 2>&1 +$FC --version >>conftest.log 2>&1 + +wk_grep_f90_NAG=`grep NAG conftest.log | head -1` +wk_grep_f90_Compaq=`grep Compaq conftest.log | head -1` +wk_grep_f90_Digital=`grep DIGITAL conftest.log | head -1` +wk_grep_f90_SGI=`grep MIPS conftest.log | head -1` +wk_grep_f90_Intel=`grep 'Intel(R)' conftest.log | head -1` +wk_grep_f90_Sun=`grep 'Sun' conftest.log | head -1` +wk_grep_f90_Lahey=`grep 'Lahey' conftest.log | head -1` +wk_grep_f90_PGI=`grep 'pgf' conftest.log | head -1` +wk_grep_f90_G95=`grep -i 'g95' conftest.log | grep -i 'gcc' | head -1` +wk_grep_f90_GFORTRAN=`grep -i 'GNU Fortran' conftest.log | head -1` +wk_grep_f90_Absoft=`grep -i 'Absoft' conftest.log | head -1` + +if test -n "$wk_grep_f90_NAG"; then + wk_cv_prog_f90_type="NAG" + wk_cv_prog_f90_version_string=$wk_grep_f90_NAG + wk_cv_prog_f90_version=[`echo $wk_cv_prog_f90_version_string | sed -e 's/.* Release \([0-9][0-9]*\.[0-9][0-9]*.*$\)/\1/'`] + wk_cv_prog_f90_major_version=[`echo $wk_cv_prog_f90_version | sed -e 's/\([0-9][0-9]*\)\..*/\1/'`] +elif test -n "$wk_grep_f90_Compaq"; then + wk_cv_prog_f90_type="Compaq" + wk_cv_prog_f90_version_string=$wk_grep_f90_Compaq +elif test -n "$wk_grep_f90_Digital"; then + wk_cv_prog_f90_type="DEC" + wk_cv_prog_f90_version_string=$wk_grep_f90_Digital +elif test -n "$wk_grep_f90_SGI"; then + wk_cv_prog_f90_type="SGI" + wk_cv_prog_f90_version_string=$wk_grep_f90_SGI +elif test -n "$wk_grep_f90_Intel"; then + wk_cv_prog_f90_type="Intel" + wk_cv_prog_f90_version_string=$wk_grep_f90_Intel + wk_cv_prog_f90_version=[`echo $wk_cv_prog_f90_version_string | sed -e 's/.* Version \([0-9][0-9]*\.[0-9][0-9]*\) .*/\1/'`] + wk_cv_prog_f90_major_version=[`echo $wk_cv_prog_f90_version | sed -e 's/\([0-9][0-9]*\)\..*/\1/'`] +elif test -n "$wk_grep_f90_Sun"; then + wk_cv_prog_f90_type="Sun" + wk_cv_prog_f90_version_string=$wk_grep_f90_Sun + wk_cv_prog_f90_version=[`echo $wk_cv_prog_f90_version_string | sed -e 's/.* Fortran 95 \([0-9][0-9]*\.[0-9][0-9]*\) .*/\1/'`] + wk_cv_prog_f90_major_version=[`echo $wk_cv_prog_f90_version | sed -e 's/\([0-9][0-9]*\)\..*/\1/'`] +elif test -n "$wk_grep_f90_Lahey"; then + wk_cv_prog_f90_type="Lahey" + wk_cv_prog_f90_version_string=$wk_grep_f90_Lahey +elif test -n "$wk_grep_f90_PGI"; then + wk_cv_prog_f90_type="PGI" + wk_cv_prog_f90_version_string=$wk_grep_f90_PGI +elif test -n "$wk_grep_f90_G95"; then + wk_cv_prog_f90_type="G95" + wk_cv_prog_f90_version_string=$wk_grep_f90_G95 +elif test -n "$wk_grep_f90_GFORTRAN"; then + wk_cv_prog_f90_type="GNU" + wk_cv_prog_f90_version_string=$wk_grep_f90_GFORTRAN + wk_cv_prog_f90_version=[`echo $wk_cv_prog_f90_version_string | sed -e 's/.*\([0-9][0-9]*\.[0-9][0-9]*\.[0-9][0-9]*\).*/\1/'`] + wk_cv_prog_f90_major_version=[`echo $wk_cv_prog_f90_version | sed -e 's/\([0-9][0-9]*\)\..*/\1/'`] +elif test -n "$wk_grep_f90_Absoft"; then + wk_cv_prog_f90_type="Absoft" + wk_cv_prog_f90_version_string=$wk_grep_f90_Absoft +else + wk_cv_prog_f90_type="unknown" + wk_cv_prog_f90_version_string="unknown" +fi + +rm -f conftest.log + +]) dnl end AC_CACHE_CHECK + +dnl Vendor-specific variables: +AC_CACHE_CHECK([the compiler vendor], [wk_cv_prog_f90_type]) + +if test -n "$wk_cv_prog_f90_version"; then + AC_CACHE_CHECK([the compiler version], [wk_cv_prog_f90_version]) +else + wk_cv_prog_f90_version=$wk_cv_prog_f90_version_string +fi + +if test -n "$wk_cv_prog_f90_major_version"; then + AC_CACHE_CHECK([the compiler major version], [wk_cv_prog_f90_major_version]) +else + wk_cv_prog_f90_major_version=$wk_cv_prog_f90_version +fi + +FC_VERSION_STRING=$wk_cv_prog_f90_version_string +FC_VENDOR=$wk_cv_prog_f90_type +FC_VERSION=$wk_cv_prog_f90_version +FC_MAJOR_VERSION=$wk_cv_prog_f90_major_version +AC_SUBST(FC_VERSION_STRING) +AC_SUBST(FC_VENDOR) +AC_SUBST(FC_VERSION) +AC_SUBST(FC_MAJOR_VERSION) + +dnl Module names: (all compilers apparently have converged to '.mod') +dnl The perl scripts need a quoted version of this +FC_MODNAME='$(1:.o=.mod)' +FC_MODNAME_Q='\$(1:.o=.mod)' +AC_SUBST(FC_MODNAME) +AC_SUBST(FC_MODNAME_Q) + +]) dnl end AC_DEFUN + + diff --git a/Zoltan-3.90/config/zac_arg_config_mpi.m4 b/Zoltan-3.90/config/zac_arg_config_mpi.m4 new file mode 100644 index 00000000..796d38bd --- /dev/null +++ b/Zoltan-3.90/config/zac_arg_config_mpi.m4 @@ -0,0 +1,279 @@ +dnl @synopsis ZAC_ARG_CONFIG_MPI +dnl +dnl Test a variety of MPI options: +dnl --enable-mpi - Turns MPI compiling mode on +dnl --with-mpi - specify root directory of MPI +dnl --with-mpi-compilers - Turns on MPI compiling mode and sets the MPI C++ +dnl compiler C, and Fortran +dnl --with-mpi-incdir - specify include directory for MPI +dnl --with-mpi-libs - specify MPI libraries +dnl --with-mpi-libdir - specify location of MPI libraries +dnl +dnl If any of these options are set, HAVE_MPI will be defined for both +dnl Autoconf and Automake, and HAVE_MPI will be defined in the +dnl generated config.h file +dnl +dnl if --disable-mpi, then Zoltan will build serial MPI. +dnl +dnl --enable-mpi and --with-mpi-compilers are the default. +dnl +dnl --without-mpi is actually a user error, but we'll interpret it as --disable-mpi +dnl +dnl If CC, CXX, F77 and/or F90/FTN/FC have been set by the user, and MPI compilers +dnl are desired, these will be assumed to be the MPI compilers. +dnl +dnl This was adapted from the Trilinos TAC_ARG_CONFIG_MPI. +dnl +AC_DEFUN([ZAC_ARG_CONFIG_MPI], +[ + +HAVE_PKG_MPI=unset +SEEK_MPI_COMPILERS=unset +MPI_COMPILER_PATH=unset + +AC_ARG_ENABLE(mpi, +[AC_HELP_STRING([--enable-mpi],[enable MPI support])], +[ + if test X${enableval} = Xno; then + HAVE_PKG_MPI=no + else + HAVE_PKG_MPI=yes + fi +] +) + +AC_ARG_WITH(mpi, +[AC_HELP_STRING([--with-mpi=MPIROOT],[the MPI root directory (above bin,lib,include), enables MPI])], +[ + if test X${withval} = Xno; then + HAVE_PKG_MPI=no + else + HAVE_PKG_MPI=yes + if test X${withval} != Xyes; then + MPI_DIR=${withval} + fi + fi +] +) + +AC_ARG_WITH(mpi-compilers, +[AC_HELP_STRING([--with-mpi-compilers={yes/no/path}],[Find MPI compilers/Don't use MPI compilers/Find MPI compilers in path])], +[ + HAVE_PKG_MPI=yes + if test X${withval} = Xno; then + SEEK_MPI_COMPILERS=no + else + SEEK_MPI_COMPILERS=yes + if test X${withval} != Xyes; then + MPI_COMPILER_PATH=${withval} + fi + fi +], +[ + if test X${HAVE_PKG_MPI} != Xno; then + SEEK_MPI_COMPILERS=yes + HAVE_PKG_MPI=yes + fi +] +) + +dnl Using MPI is the default + +if test X${HAVE_PKG_MPI} = unset ; then + HAVE_PKG_MPI=yes +fi + +if test X${SEEK_MPI_COMPILERS} = Xyes; then + + if test X${MPI_COMPILER_PATH} != Xunset ; then +# MPI_SEEK_PATH=$MPI_COMPILER_PATH$PATH_SEPARATOR$PATH + MPI_SEEK_PATH=$MPI_COMPILER_PATH + elif test -n "${MPI_DIR}" ; then +# MPI_SEEK_PATH=$MPI_DIR/bin$PATH_SEPARATOR$PATH + MPI_SEEK_PATH=$MPI_DIR/bin + else + MPI_SEEK_PATH=$PATH + fi + + dnl Find C MPI compiler if MPI_CC is not already defined + + if test -z "${MPI_CC}"; then + if test -f "${CC}"; then + + MPI_CC=${CC} + + else + + if test -n "${CC}" ; then + MPI_CC_CANDIDATE=${CC} + else + MPI_CC_CANDIDATE=mpicc + fi + + AC_PATH_PROG(MPI_CC, ${MPI_CC_CANDIDATE}, [notFound], [PATH = ${MPI_SEEK_PATH}]) + + if test "${MPI_CC}" != "notFound" ; then + CC=${MPI_CC} + else + echo "-----" + echo "Cannot find MPI C compiler in " ${MPI_SEEK_PATH} + echo "Specify a path to all mpi compilers with --with-mpi-compilers=PATH," + echo "or specify a path to top mpi directory (above bin) with --with-mpi=PATH," + echo "or specify a C compiler using CC=" + echo "or --disable-mpi" + echo "-----" + AC_MSG_ERROR([MPI C compiler not found.]) + fi + fi + fi + + if test "X$ac_cv_use_zoltan_cppdriver" = "Xyes"; then + dnl Find C++ MPI compiler if MPI_CXX is not already defined + + if test -z "${MPI_CXX}"; then + + if test -f "${CXX}"; then + + MPI_CXX=${CXX} + + else + + if test -n "${CXX}" ; then + MPI_CXX_CANDIDATES=${CXX} + else + MPI_CXX_CANDIDATES="[mpicxx mpic++ mpiCC]" + fi + + AC_PATH_PROGS(MPI_CXX, ${MPI_CXX_CANDIDATES}, [notFound], [PATH = ${MPI_SEEK_PATH}]) + + if test "${MPI_CXX}" != "notFound" ; then + CXX=${MPI_CXX} + else + echo "-----" + echo "Cannot find MPI C++ compiler in " ${MPI_SEEK_PATH} + echo "Specify a path to all mpi compilers with --with-mpi-compilers=PATH," + echo "or specify a path to top mpi directory (above bin) with --with-mpi=PATH," + echo "or specify a C++ compiler using CXX=" + echo "or --disable-mpi" + echo "-----" + AC_MSG_ERROR([MPI C++ compiler not found.]) + fi + fi + fi + fi + +# if test "X$ac_cv_use_fortran" = "Xyes"; then +# dnl Find a Fortran 77 MPI compiler if MPI_F77 is not already defined +# +# if test -z "${MPI_F77}"; then +# MPI_F77_CANDIDATE=mpif77 +# if test -n "${F77}"; then +# MPI_F77_CANDIDATE=${F77} +# fi +# +# AC_PATH_PROG(MPI_F77, ${MPI_F77_CANDIDATE}, [notFound], [PATH = ${MPI_SEEK_PATH}]) +# +# if test "${MPI_F77}" != "notFound" ; then +# F77=${MPI_F77} +# else +# echo "-----" +# echo "Cannot find MPI Fortan 77 compiler." +# echo "Specify a path to all mpi compilers with --with-mpi-compilers=PATH," +# echo "or specify a path to top mpi directory (above bin) with --with-mpi=PATH," +# echo "or specify a fortran 77 compiler using F77=" +# echo "-----" +# AC_MSG_ERROR([MPI C compiler not found.]) +# fi +# fi +# fi + + if test "X$ac_cv_use_fortran90" = "Xyes"; then + dnl Find a Fortran 90 MPI compiler if MPI_F90 is not already defined + + if test -z "${MPI_FC}"; then + + if test -f "${FC}"; then + + MPI_FC=${FC} + + else + MPI_FC_CANDIDATES="" + + if test -n "${FC}"; then + MPI_FC_CANDIDATES=${FC} + elif test -n "${FTN}"; then + MPI_FC_CANDIDATES=${FTN} + elif test -n "${F90}"; then + MPI_FC_CANDIDATES=${F90} + fi + + if test -n "${MPI_FC_CANDIDATES}" && test -f ${MPI_FC_CANDIDATES} ; then + MPI_FC=${MPI_FC_CANDIDATES} + else + if test -z "${MPI_FC_CANDIDATES}"; then + MPI_FC_CANDIDATES="[mpif90 mpif77]" + fi + + AC_PATH_PROGS(MPI_FC, ${MPI_FC_CANDIDATES}, [notFound], [PATH = ${MPI_SEEK_PATH}]) + + if test "${MPI_FC}" != "notFound" ; then + FC=${MPI_FC} + else + echo "-----" + echo "Cannot find MPI Fortran 90 compiler in " ${MPI_SEEK_PATH} + echo "Specify a path to all mpi compilers with --with-mpi-compilers=PATH," + echo "or specify a path to top mpi directory (above bin) with --with-mpi=PATH," + echo "or specify a fortran 90 compiler using FC=" + echo "-----" + AC_MSG_ERROR([MPI C compiler not found.]) + fi + fi + fi + fi + fi +fi + +#AC_ARG_WITH(mpi-include, +#[AC_HELP_STRING([--with-mpi-include],[Obsolete. Use --with-mpi-incdir=DIR instead. Do not prefix DIR with '-I'.])], +#[AC_MSG_ERROR([--with-mpi-include is an obsolte option. Use --with-mpi-incdir=DIR instead. Do not prefix DIR with '-I'. For example '--with-mpi-incdir=/usr/lam_path/include'.])] +#) + +AC_ARG_WITH(mpi-libs, +[AC_HELP_STRING([--with-mpi-libs="LIBS"],[MPI libraries @<:@"-lmpi"@:>@])], +[ + MPI_LIBS=${withval} + AC_MSG_CHECKING(user-defined MPI libraries) + AC_MSG_RESULT([${MPI_LIBS}]) +] +) + +AC_ARG_WITH(mpi-incdir, +[AC_HELP_STRING([--with-mpi-incdir=DIR],[MPI include directory @<:@MPIROOT/include@:>@ Do not use -I])], +[ + MPI_INC=${withval} + AC_MSG_CHECKING(user-defined MPI includes) + AC_MSG_RESULT([${MPI_INC}]) +] +) + +AC_ARG_WITH(mpi-libdir, +[AC_HELP_STRING([--with-mpi-libdir=DIR],[MPI library directory @<:@MPIROOT/lib@:>@ Do not use -L])], +[ + MPI_LIBDIR=${withval} + AC_MSG_CHECKING(user-defined MPI library directory) + AC_MSG_RESULT([${MPI_LIBDIR}]) +] +) + +AC_MSG_CHECKING(whether we are using MPI) +AC_MSG_RESULT([${HAVE_PKG_MPI}]) + +if test "X${HAVE_PKG_MPI}" = "Xyes"; then + AC_DEFINE(HAVE_MPI,,[define if we want to use MPI]) +fi + +dnl Define Automake version of HAVE_MPI if appropriate + +AM_CONDITIONAL(HAVE_MPI, [test "X${HAVE_PKG_MPI}" = "Xyes"]) + +]) diff --git a/Zoltan-3.90/config/zac_arg_with_id.m4 b/Zoltan-3.90/config/zac_arg_with_id.m4 new file mode 100644 index 00000000..98a7e911 --- /dev/null +++ b/Zoltan-3.90/config/zac_arg_with_id.m4 @@ -0,0 +1,39 @@ +dnl @synopsis ZAC_ARG_WITH_ID +dnl +dnl Test for "--with-id-type=" +dnl Default is "unsigned int". Can also be "long", "llong" or "int". +dnl +dnl Generates config.h macro. +dnl +AC_DEFUN([ZAC_ARG_WITH_ID], +[ +AC_MSG_CHECKING([data type for ZOLTAN_ID_TYPE]) +zoltan_id_type="unset" +AC_ARG_WITH(id-type, +AC_HELP_STRING([--with-id-type], [Zoltan global ID type: uint (default), ulong, or ullong]), +[ +if test "X$withval" == "Xuint" ; then + AC_DEFINE([UNSIGNED_INT_GLOBAL_IDS],[1],[define if ZOLTAN_ID_TYPE is unsigned int]) + zoltan_id_type="unsigned int" +else + if test "X$withval" == "Xulong" ; then + AC_DEFINE([UNSIGNED_LONG_GLOBAL_IDS],[1],[define if ZOLTAN_ID_TYPE is unsigned long]) + zoltan_id_type="unsigned long" + else + if test "X$withval" == "Xullong" ; then + AC_DEFINE([UNSIGNED_LONG_LONG_GLOBAL_IDS],[1],[define if ZOLTAN_ID_TYPE is unsigned long long]) + zoltan_id_type="unsigned long long" + else + AC_MSG_ERROR([Valid global ID types for Zoltan are uint, ulong, and ullong]) + fi + fi +fi +], +[ +AC_DEFINE([UNSIGNED_INT_GLOBAL_IDS],[1],[define if ZOLTAN_ID_TYPE is unsigned int]) +zoltan_id_type="unsigned int" +] +) +AC_MSG_RESULT([typedef $zoltan_id_type ZOLTAN_ID_TYPE]) +] +) diff --git a/Zoltan-3.90/configure b/Zoltan-3.90/configure new file mode 100755 index 00000000..c0f33f40 --- /dev/null +++ b/Zoltan-3.90/configure @@ -0,0 +1,9598 @@ +#! /bin/sh +# Guess values for system-dependent variables and create Makefiles. +# Generated by GNU Autoconf 2.68 for Zoltan 3.6. +# +# Report bugs to . +# +# +# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001, +# 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software +# Foundation, Inc. +# +# +# This configure script is free software; the Free Software Foundation +# gives unlimited permission to copy, distribute and modify it. +## -------------------- ## +## M4sh Initialization. ## +## -------------------- ## + +# Be more Bourne compatible +DUALCASE=1; export DUALCASE # for MKS sh +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in #( + *posix*) : + set -o posix ;; #( + *) : + ;; +esac +fi + + +as_nl=' +' +export as_nl +# Printing a long string crashes Solaris 7 /usr/bin/printf. +as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo +# Prefer a ksh shell builtin over an external printf program on Solaris, +# but without wasting forks for bash or zsh. +if test -z "$BASH_VERSION$ZSH_VERSION" \ + && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='print -r --' + as_echo_n='print -rn --' +elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='printf %s\n' + as_echo_n='printf %s' +else + if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then + as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' + as_echo_n='/usr/ucb/echo -n' + else + as_echo_body='eval expr "X$1" : "X\\(.*\\)"' + as_echo_n_body='eval + arg=$1; + case $arg in #( + *"$as_nl"*) + expr "X$arg" : "X\\(.*\\)$as_nl"; + arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; + esac; + expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" + ' + export as_echo_n_body + as_echo_n='sh -c $as_echo_n_body as_echo' + fi + export as_echo_body + as_echo='sh -c $as_echo_body as_echo' +fi + +# The user is always right. +if test "${PATH_SEPARATOR+set}" != set; then + PATH_SEPARATOR=: + (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { + (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || + PATH_SEPARATOR=';' + } +fi + + +# IFS +# We need space, tab and new line, in precisely that order. Quoting is +# there to prevent editors from complaining about space-tab. +# (If _AS_PATH_WALK were called with IFS unset, it would disable word +# splitting by setting IFS to empty value.) +IFS=" "" $as_nl" + +# Find who we are. Look in the path if we contain no directory separator. +as_myself= +case $0 in #(( + *[\\/]* ) as_myself=$0 ;; + *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break + done +IFS=$as_save_IFS + + ;; +esac +# We did not find ourselves, most probably we were run as `sh COMMAND' +# in which case we are not to be found in the path. +if test "x$as_myself" = x; then + as_myself=$0 +fi +if test ! -f "$as_myself"; then + $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 + exit 1 +fi + +# Unset variables that we do not need and which cause bugs (e.g. in +# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" +# suppresses any "Segmentation fault" message there. '((' could +# trigger a bug in pdksh 5.2.14. +for as_var in BASH_ENV ENV MAIL MAILPATH +do eval test x\${$as_var+set} = xset \ + && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : +done +PS1='$ ' +PS2='> ' +PS4='+ ' + +# NLS nuisances. +LC_ALL=C +export LC_ALL +LANGUAGE=C +export LANGUAGE + +# CDPATH. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +if test "x$CONFIG_SHELL" = x; then + as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which + # is contrary to our usage. Disable this feature. + alias -g '\${1+\"\$@\"}'='\"\$@\"' + setopt NO_GLOB_SUBST +else + case \`(set -o) 2>/dev/null\` in #( + *posix*) : + set -o posix ;; #( + *) : + ;; +esac +fi +" + as_required="as_fn_return () { (exit \$1); } +as_fn_success () { as_fn_return 0; } +as_fn_failure () { as_fn_return 1; } +as_fn_ret_success () { return 0; } +as_fn_ret_failure () { return 1; } + +exitcode=0 +as_fn_success || { exitcode=1; echo as_fn_success failed.; } +as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; } +as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; } +as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; } +if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then : + +else + exitcode=1; echo positional parameters were not saved. +fi +test x\$exitcode = x0 || exit 1" + as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO + as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO + eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" && + test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1" + if (eval "$as_required") 2>/dev/null; then : + as_have_required=yes +else + as_have_required=no +fi + if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then : + +else + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +as_found=false +for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + as_found=: + case $as_dir in #( + /*) + for as_base in sh bash ksh sh5; do + # Try only shells that exist, to save several forks. + as_shell=$as_dir/$as_base + if { test -f "$as_shell" || test -f "$as_shell.exe"; } && + { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then : + CONFIG_SHELL=$as_shell as_have_required=yes + if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then : + break 2 +fi +fi + done;; + esac + as_found=false +done +$as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } && + { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then : + CONFIG_SHELL=$SHELL as_have_required=yes +fi; } +IFS=$as_save_IFS + + + if test "x$CONFIG_SHELL" != x; then : + # We cannot yet assume a decent shell, so we have to provide a + # neutralization value for shells without unset; and this also + # works around shells that cannot unset nonexistent variables. + # Preserve -v and -x to the replacement shell. + BASH_ENV=/dev/null + ENV=/dev/null + (unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV + export CONFIG_SHELL + case $- in # (((( + *v*x* | *x*v* ) as_opts=-vx ;; + *v* ) as_opts=-v ;; + *x* ) as_opts=-x ;; + * ) as_opts= ;; + esac + exec "$CONFIG_SHELL" $as_opts "$as_myself" ${1+"$@"} +fi + + if test x$as_have_required = xno; then : + $as_echo "$0: This script requires a shell more modern than all" + $as_echo "$0: the shells that I found on your system." + if test x${ZSH_VERSION+set} = xset ; then + $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should" + $as_echo "$0: be upgraded to zsh 4.3.4 or later." + else + $as_echo "$0: Please tell bug-autoconf@gnu.org and lriesen@sandia.gov +$0: about your system, including any error possibly output +$0: before this message. Then install a modern shell, or +$0: manually run the script under such a shell if you do +$0: have one." + fi + exit 1 +fi +fi +fi +SHELL=${CONFIG_SHELL-/bin/sh} +export SHELL +# Unset more variables known to interfere with behavior of common tools. +CLICOLOR_FORCE= GREP_OPTIONS= +unset CLICOLOR_FORCE GREP_OPTIONS + +## --------------------- ## +## M4sh Shell Functions. ## +## --------------------- ## +# as_fn_unset VAR +# --------------- +# Portably unset VAR. +as_fn_unset () +{ + { eval $1=; unset $1;} +} +as_unset=as_fn_unset + +# as_fn_set_status STATUS +# ----------------------- +# Set $? to STATUS, without forking. +as_fn_set_status () +{ + return $1 +} # as_fn_set_status + +# as_fn_exit STATUS +# ----------------- +# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. +as_fn_exit () +{ + set +e + as_fn_set_status $1 + exit $1 +} # as_fn_exit + +# as_fn_mkdir_p +# ------------- +# Create "$as_dir" as a directory, including parents if necessary. +as_fn_mkdir_p () +{ + + case $as_dir in #( + -*) as_dir=./$as_dir;; + esac + test -d "$as_dir" || eval $as_mkdir_p || { + as_dirs= + while :; do + case $as_dir in #( + *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( + *) as_qdir=$as_dir;; + esac + as_dirs="'$as_qdir' $as_dirs" + as_dir=`$as_dirname -- "$as_dir" || +$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$as_dir" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + test -d "$as_dir" && break + done + test -z "$as_dirs" || eval "mkdir $as_dirs" + } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" + + +} # as_fn_mkdir_p +# as_fn_append VAR VALUE +# ---------------------- +# Append the text in VALUE to the end of the definition contained in VAR. Take +# advantage of any shell optimizations that allow amortized linear growth over +# repeated appends, instead of the typical quadratic growth present in naive +# implementations. +if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : + eval 'as_fn_append () + { + eval $1+=\$2 + }' +else + as_fn_append () + { + eval $1=\$$1\$2 + } +fi # as_fn_append + +# as_fn_arith ARG... +# ------------------ +# Perform arithmetic evaluation on the ARGs, and store the result in the +# global $as_val. Take advantage of shells that can avoid forks. The arguments +# must be portable across $(()) and expr. +if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : + eval 'as_fn_arith () + { + as_val=$(( $* )) + }' +else + as_fn_arith () + { + as_val=`expr "$@" || test $? -eq 1` + } +fi # as_fn_arith + + +# as_fn_error STATUS ERROR [LINENO LOG_FD] +# ---------------------------------------- +# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are +# provided, also output the error to LOG_FD, referencing LINENO. Then exit the +# script with STATUS, using 1 if that was 0. +as_fn_error () +{ + as_status=$1; test $as_status -eq 0 && as_status=1 + if test "$4"; then + as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 + fi + $as_echo "$as_me: error: $2" >&2 + as_fn_exit $as_status +} # as_fn_error + +if expr a : '\(a\)' >/dev/null 2>&1 && + test "X`expr 00001 : '.*\(...\)'`" = X001; then + as_expr=expr +else + as_expr=false +fi + +if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then + as_basename=basename +else + as_basename=false +fi + +if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then + as_dirname=dirname +else + as_dirname=false +fi + +as_me=`$as_basename -- "$0" || +$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X/"$0" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ + s//\1/ + q + } + /^X\/\(\/\/\)$/{ + s//\1/ + q + } + /^X\/\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + +# Avoid depending upon Character Ranges. +as_cr_letters='abcdefghijklmnopqrstuvwxyz' +as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' +as_cr_Letters=$as_cr_letters$as_cr_LETTERS +as_cr_digits='0123456789' +as_cr_alnum=$as_cr_Letters$as_cr_digits + + + as_lineno_1=$LINENO as_lineno_1a=$LINENO + as_lineno_2=$LINENO as_lineno_2a=$LINENO + eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" && + test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || { + # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-) + sed -n ' + p + /[$]LINENO/= + ' <$as_myself | + sed ' + s/[$]LINENO.*/&-/ + t lineno + b + :lineno + N + :loop + s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ + t loop + s/-\n.*// + ' >$as_me.lineno && + chmod +x "$as_me.lineno" || + { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; } + + # Don't try to exec as it changes $[0], causing all sort of problems + # (the dirname of $[0] is not the place where we might find the + # original and so on. Autoconf is especially sensitive to this). + . "./$as_me.lineno" + # Exit status is that of the last command. + exit +} + +ECHO_C= ECHO_N= ECHO_T= +case `echo -n x` in #((((( +-n*) + case `echo 'xy\c'` in + *c*) ECHO_T=' ';; # ECHO_T is single tab character. + xy) ECHO_C='\c';; + *) echo `echo ksh88 bug on AIX 6.1` > /dev/null + ECHO_T=' ';; + esac;; +*) + ECHO_N='-n';; +esac + +rm -f conf$$ conf$$.exe conf$$.file +if test -d conf$$.dir; then + rm -f conf$$.dir/conf$$.file +else + rm -f conf$$.dir + mkdir conf$$.dir 2>/dev/null +fi +if (echo >conf$$.file) 2>/dev/null; then + if ln -s conf$$.file conf$$ 2>/dev/null; then + as_ln_s='ln -s' + # ... but there are two gotchas: + # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. + # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. + # In both cases, we have to default to `cp -p'. + ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || + as_ln_s='cp -p' + elif ln conf$$.file conf$$ 2>/dev/null; then + as_ln_s=ln + else + as_ln_s='cp -p' + fi +else + as_ln_s='cp -p' +fi +rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file +rmdir conf$$.dir 2>/dev/null + +if mkdir -p . 2>/dev/null; then + as_mkdir_p='mkdir -p "$as_dir"' +else + test -d ./-p && rmdir ./-p + as_mkdir_p=false +fi + +if test -x / >/dev/null 2>&1; then + as_test_x='test -x' +else + if ls -dL / >/dev/null 2>&1; then + as_ls_L_option=L + else + as_ls_L_option= + fi + as_test_x=' + eval sh -c '\'' + if test -d "$1"; then + test -d "$1/."; + else + case $1 in #( + -*)set "./$1";; + esac; + case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in #(( + ???[sx]*):;;*)false;;esac;fi + '\'' sh + ' +fi +as_executable_p=$as_test_x + +# Sed expression to map a string onto a valid CPP name. +as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" + +# Sed expression to map a string onto a valid variable name. +as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" + + +test -n "$DJDIR" || exec 7<&0 &1 + +# Name of the host. +# hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status, +# so uname gets run too. +ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` + +# +# Initializations. +# +ac_default_prefix=/usr/local +ac_clean_files= +ac_config_libobj_dir=. +LIBOBJS= +cross_compiling=no +subdirs= +MFLAGS= +MAKEFLAGS= + +# Identity of this package. +PACKAGE_NAME='Zoltan' +PACKAGE_TARNAME='zoltan' +PACKAGE_VERSION='3.6' +PACKAGE_STRING='Zoltan 3.6' +PACKAGE_BUGREPORT='lriesen@sandia.gov' +PACKAGE_URL='' + +ac_unique_file="src/include/zoltan.h" +ac_subst_vars='am__EXEEXT_FALSE +am__EXEEXT_TRUE +LTLIBOBJS +LIBOBJS +ac_aux_dir +NAG_F90_COMPILER_FALSE +NAG_F90_COMPILER_TRUE +F90_MODULE_LOWERCASE_FALSE +F90_MODULE_LOWERCASE_TRUE +FC_MODNAME_Q +FC_MODNAME +FC_MAJOR_VERSION +FC_VERSION +FC_VENDOR +FC_VERSION_STRING +FCFLAGS_f90 +FCFLAGS_f +FCLIBS +SUB_EXAMPLE_FALSE +SUB_EXAMPLE_TRUE +SUB_TEST_FALSE +SUB_TEST_TRUE +BUILD_EXAMPLES_FALSE +BUILD_EXAMPLES_TRUE +BUILD_TESTS_FALSE +BUILD_TESTS_TRUE +BUILD_SCOTCH_FALSE +BUILD_SCOTCH_TRUE +BUILD_PARMETIS_FALSE +BUILD_PARMETIS_TRUE +BUILD_GZIP_FALSE +BUILD_GZIP_TRUE +USING_GNUMAKE_FALSE +USING_GNUMAKE_TRUE +USING_PERL_FALSE +USING_PERL_TRUE +HAVE_PERL +PERL_EXE +USING_EXPORT_MAKEFILES_FALSE +USING_EXPORT_MAKEFILES_TRUE +CXXCPP +ALTERNATE_AR +USE_ALTERNATE_AR_FALSE +USE_ALTERNATE_AR_TRUE +RANLIB +ac_ct_FC +FCFLAGS +FC +am__fastdepCXX_FALSE +am__fastdepCXX_TRUE +CXXDEPMODE +ac_ct_CXX +CXXFLAGS +CXX +am__fastdepCC_FALSE +am__fastdepCC_TRUE +CCDEPMODE +am__nodep +AMDEPBACKSLASH +AMDEP_FALSE +AMDEP_TRUE +am__quote +am__include +DEPDIR +OBJEXT +EXEEXT +ac_ct_CC +CPPFLAGS +LDFLAGS +CFLAGS +CC +MPI_RECV_LIMIT_FLAG +HAVE_MPI_FALSE +HAVE_MPI_TRUE +MPI_FC +MPI_CXX +MPI_CC +HAVE_ZOLTAN_CPPDRIVER_FALSE +HAVE_ZOLTAN_CPPDRIVER_TRUE +BUILD_ZOLTAN_F90_INTERFACE_FALSE +BUILD_ZOLTAN_F90_INTERFACE_TRUE +am__untar +am__tar +AMTAR +am__leading_dot +SET_MAKE +AWK +mkdir_p +MKDIR_P +INSTALL_STRIP_PROGRAM +STRIP +install_sh +MAKEINFO +AUTOHEADER +AUTOMAKE +AUTOCONF +ACLOCAL +VERSION +PACKAGE +CYGPATH_W +am__isrc +INSTALL_DATA +INSTALL_SCRIPT +INSTALL_PROGRAM +HOST_SOLARIS_FALSE +HOST_SOLARIS_TRUE +HOST_LINUX_FALSE +HOST_LINUX_TRUE +HOST_CYGWIN_FALSE +HOST_CYGWIN_TRUE +target_os +target_vendor +target_cpu +target +host_os +host_vendor +host_cpu +host +build_os +build_vendor +build_cpu +build +MAINT +MAINTAINER_MODE_FALSE +MAINTAINER_MODE_TRUE +target_alias +host_alias +build_alias +LIBS +ECHO_T +ECHO_N +ECHO_C +DEFS +mandir +localedir +libdir +psdir +pdfdir +dvidir +htmldir +infodir +docdir +oldincludedir +includedir +localstatedir +sharedstatedir +sysconfdir +datadir +datarootdir +libexecdir +sbindir +bindir +program_transform_name +prefix +exec_prefix +PACKAGE_URL +PACKAGE_BUGREPORT +PACKAGE_STRING +PACKAGE_VERSION +PACKAGE_TARNAME +PACKAGE_NAME +PATH_SEPARATOR +SHELL' +ac_subst_files='' +ac_user_opts=' +enable_option_checking +with_install +enable_maintainer_mode +enable_f90interface +enable_zoltan_cppdriver +enable_mpi +with_mpi +with_mpi_compilers +with_mpi_libs +with_mpi_incdir +with_mpi_libdir +enable_mpi_recv_limit +enable_dependency_tracking +with_ccflags +with_cxxflags +with_cflags +with_fcflags +with_libs +with_ldflags +with_ar +enable_export_makefiles +with_perl +with_gnumake +with_id_type +enable_gzip +with_parmetis +with_parmetis_libdir +with_parmetis_incdir +with_scotch +with_scotch_libdir +with_scotch_incdir +with_patoh +with_patoh_libdir +with_patoh_incdir +with_nemesis_exodus +enable_tests +enable_zoltan_tests +enable_examples +enable_zoltan_examples +with_libdirs +with_incdirs +' + ac_precious_vars='build_alias +host_alias +target_alias +CC +CFLAGS +LDFLAGS +LIBS +CPPFLAGS +CXX +CXXFLAGS +CCC +FC +FCFLAGS +CXXCPP' + + +# Initialize some variables set by options. +ac_init_help= +ac_init_version=false +ac_unrecognized_opts= +ac_unrecognized_sep= +# The variables have the same names as the options, with +# dashes changed to underlines. +cache_file=/dev/null +exec_prefix=NONE +no_create= +no_recursion= +prefix=NONE +program_prefix=NONE +program_suffix=NONE +program_transform_name=s,x,x, +silent= +site= +srcdir= +verbose= +x_includes=NONE +x_libraries=NONE + +# Installation directory options. +# These are left unexpanded so users can "make install exec_prefix=/foo" +# and all the variables that are supposed to be based on exec_prefix +# by default will actually change. +# Use braces instead of parens because sh, perl, etc. also accept them. +# (The list follows the same order as the GNU Coding Standards.) +bindir='${exec_prefix}/bin' +sbindir='${exec_prefix}/sbin' +libexecdir='${exec_prefix}/libexec' +datarootdir='${prefix}/share' +datadir='${datarootdir}' +sysconfdir='${prefix}/etc' +sharedstatedir='${prefix}/com' +localstatedir='${prefix}/var' +includedir='${prefix}/include' +oldincludedir='/usr/include' +docdir='${datarootdir}/doc/${PACKAGE_TARNAME}' +infodir='${datarootdir}/info' +htmldir='${docdir}' +dvidir='${docdir}' +pdfdir='${docdir}' +psdir='${docdir}' +libdir='${exec_prefix}/lib' +localedir='${datarootdir}/locale' +mandir='${datarootdir}/man' + +ac_prev= +ac_dashdash= +for ac_option +do + # If the previous option needs an argument, assign it. + if test -n "$ac_prev"; then + eval $ac_prev=\$ac_option + ac_prev= + continue + fi + + case $ac_option in + *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;; + *=) ac_optarg= ;; + *) ac_optarg=yes ;; + esac + + # Accept the important Cygnus configure options, so we can diagnose typos. + + case $ac_dashdash$ac_option in + --) + ac_dashdash=yes ;; + + -bindir | --bindir | --bindi | --bind | --bin | --bi) + ac_prev=bindir ;; + -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) + bindir=$ac_optarg ;; + + -build | --build | --buil | --bui | --bu) + ac_prev=build_alias ;; + -build=* | --build=* | --buil=* | --bui=* | --bu=*) + build_alias=$ac_optarg ;; + + -cache-file | --cache-file | --cache-fil | --cache-fi \ + | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) + ac_prev=cache_file ;; + -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ + | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) + cache_file=$ac_optarg ;; + + --config-cache | -C) + cache_file=config.cache ;; + + -datadir | --datadir | --datadi | --datad) + ac_prev=datadir ;; + -datadir=* | --datadir=* | --datadi=* | --datad=*) + datadir=$ac_optarg ;; + + -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \ + | --dataroo | --dataro | --datar) + ac_prev=datarootdir ;; + -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \ + | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) + datarootdir=$ac_optarg ;; + + -disable-* | --disable-*) + ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid feature name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"enable_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval enable_$ac_useropt=no ;; + + -docdir | --docdir | --docdi | --doc | --do) + ac_prev=docdir ;; + -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*) + docdir=$ac_optarg ;; + + -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv) + ac_prev=dvidir ;; + -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) + dvidir=$ac_optarg ;; + + -enable-* | --enable-*) + ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid feature name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"enable_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval enable_$ac_useropt=\$ac_optarg ;; + + -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ + | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ + | --exec | --exe | --ex) + ac_prev=exec_prefix ;; + -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ + | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ + | --exec=* | --exe=* | --ex=*) + exec_prefix=$ac_optarg ;; + + -gas | --gas | --ga | --g) + # Obsolete; use --with-gas. + with_gas=yes ;; + + -help | --help | --hel | --he | -h) + ac_init_help=long ;; + -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) + ac_init_help=recursive ;; + -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) + ac_init_help=short ;; + + -host | --host | --hos | --ho) + ac_prev=host_alias ;; + -host=* | --host=* | --hos=* | --ho=*) + host_alias=$ac_optarg ;; + + -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht) + ac_prev=htmldir ;; + -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \ + | --ht=*) + htmldir=$ac_optarg ;; + + -includedir | --includedir | --includedi | --included | --include \ + | --includ | --inclu | --incl | --inc) + ac_prev=includedir ;; + -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ + | --includ=* | --inclu=* | --incl=* | --inc=*) + includedir=$ac_optarg ;; + + -infodir | --infodir | --infodi | --infod | --info | --inf) + ac_prev=infodir ;; + -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) + infodir=$ac_optarg ;; + + -libdir | --libdir | --libdi | --libd) + ac_prev=libdir ;; + -libdir=* | --libdir=* | --libdi=* | --libd=*) + libdir=$ac_optarg ;; + + -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ + | --libexe | --libex | --libe) + ac_prev=libexecdir ;; + -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ + | --libexe=* | --libex=* | --libe=*) + libexecdir=$ac_optarg ;; + + -localedir | --localedir | --localedi | --localed | --locale) + ac_prev=localedir ;; + -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*) + localedir=$ac_optarg ;; + + -localstatedir | --localstatedir | --localstatedi | --localstated \ + | --localstate | --localstat | --localsta | --localst | --locals) + ac_prev=localstatedir ;; + -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ + | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*) + localstatedir=$ac_optarg ;; + + -mandir | --mandir | --mandi | --mand | --man | --ma | --m) + ac_prev=mandir ;; + -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) + mandir=$ac_optarg ;; + + -nfp | --nfp | --nf) + # Obsolete; use --without-fp. + with_fp=no ;; + + -no-create | --no-create | --no-creat | --no-crea | --no-cre \ + | --no-cr | --no-c | -n) + no_create=yes ;; + + -no-recursion | --no-recursion | --no-recursio | --no-recursi \ + | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) + no_recursion=yes ;; + + -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ + | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ + | --oldin | --oldi | --old | --ol | --o) + ac_prev=oldincludedir ;; + -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ + | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ + | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) + oldincludedir=$ac_optarg ;; + + -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) + ac_prev=prefix ;; + -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) + prefix=$ac_optarg ;; + + -program-prefix | --program-prefix | --program-prefi | --program-pref \ + | --program-pre | --program-pr | --program-p) + ac_prev=program_prefix ;; + -program-prefix=* | --program-prefix=* | --program-prefi=* \ + | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) + program_prefix=$ac_optarg ;; + + -program-suffix | --program-suffix | --program-suffi | --program-suff \ + | --program-suf | --program-su | --program-s) + ac_prev=program_suffix ;; + -program-suffix=* | --program-suffix=* | --program-suffi=* \ + | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) + program_suffix=$ac_optarg ;; + + -program-transform-name | --program-transform-name \ + | --program-transform-nam | --program-transform-na \ + | --program-transform-n | --program-transform- \ + | --program-transform | --program-transfor \ + | --program-transfo | --program-transf \ + | --program-trans | --program-tran \ + | --progr-tra | --program-tr | --program-t) + ac_prev=program_transform_name ;; + -program-transform-name=* | --program-transform-name=* \ + | --program-transform-nam=* | --program-transform-na=* \ + | --program-transform-n=* | --program-transform-=* \ + | --program-transform=* | --program-transfor=* \ + | --program-transfo=* | --program-transf=* \ + | --program-trans=* | --program-tran=* \ + | --progr-tra=* | --program-tr=* | --program-t=*) + program_transform_name=$ac_optarg ;; + + -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd) + ac_prev=pdfdir ;; + -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*) + pdfdir=$ac_optarg ;; + + -psdir | --psdir | --psdi | --psd | --ps) + ac_prev=psdir ;; + -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*) + psdir=$ac_optarg ;; + + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil) + silent=yes ;; + + -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) + ac_prev=sbindir ;; + -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ + | --sbi=* | --sb=*) + sbindir=$ac_optarg ;; + + -sharedstatedir | --sharedstatedir | --sharedstatedi \ + | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ + | --sharedst | --shareds | --shared | --share | --shar \ + | --sha | --sh) + ac_prev=sharedstatedir ;; + -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ + | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ + | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ + | --sha=* | --sh=*) + sharedstatedir=$ac_optarg ;; + + -site | --site | --sit) + ac_prev=site ;; + -site=* | --site=* | --sit=*) + site=$ac_optarg ;; + + -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) + ac_prev=srcdir ;; + -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) + srcdir=$ac_optarg ;; + + -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ + | --syscon | --sysco | --sysc | --sys | --sy) + ac_prev=sysconfdir ;; + -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ + | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) + sysconfdir=$ac_optarg ;; + + -target | --target | --targe | --targ | --tar | --ta | --t) + ac_prev=target_alias ;; + -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) + target_alias=$ac_optarg ;; + + -v | -verbose | --verbose | --verbos | --verbo | --verb) + verbose=yes ;; + + -version | --version | --versio | --versi | --vers | -V) + ac_init_version=: ;; + + -with-* | --with-*) + ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid package name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"with_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval with_$ac_useropt=\$ac_optarg ;; + + -without-* | --without-*) + ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid package name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"with_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval with_$ac_useropt=no ;; + + --x) + # Obsolete; use --with-x. + with_x=yes ;; + + -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ + | --x-incl | --x-inc | --x-in | --x-i) + ac_prev=x_includes ;; + -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ + | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) + x_includes=$ac_optarg ;; + + -x-libraries | --x-libraries | --x-librarie | --x-librari \ + | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) + ac_prev=x_libraries ;; + -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ + | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) + x_libraries=$ac_optarg ;; + + -*) as_fn_error $? "unrecognized option: \`$ac_option' +Try \`$0 --help' for more information" + ;; + + *=*) + ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` + # Reject names that are not valid shell variable names. + case $ac_envvar in #( + '' | [0-9]* | *[!_$as_cr_alnum]* ) + as_fn_error $? "invalid variable name: \`$ac_envvar'" ;; + esac + eval $ac_envvar=\$ac_optarg + export $ac_envvar ;; + + *) + # FIXME: should be removed in autoconf 3.0. + $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2 + expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && + $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2 + : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}" + ;; + + esac +done + +if test -n "$ac_prev"; then + ac_option=--`echo $ac_prev | sed 's/_/-/g'` + as_fn_error $? "missing argument to $ac_option" +fi + +if test -n "$ac_unrecognized_opts"; then + case $enable_option_checking in + no) ;; + fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;; + *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; + esac +fi + +# Check all directory arguments for consistency. +for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ + datadir sysconfdir sharedstatedir localstatedir includedir \ + oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ + libdir localedir mandir +do + eval ac_val=\$$ac_var + # Remove trailing slashes. + case $ac_val in + */ ) + ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'` + eval $ac_var=\$ac_val;; + esac + # Be sure to have absolute directory names. + case $ac_val in + [\\/$]* | ?:[\\/]* ) continue;; + NONE | '' ) case $ac_var in *prefix ) continue;; esac;; + esac + as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val" +done + +# There might be people who depend on the old broken behavior: `$host' +# used to hold the argument of --host etc. +# FIXME: To remove some day. +build=$build_alias +host=$host_alias +target=$target_alias + +# FIXME: To remove some day. +if test "x$host_alias" != x; then + if test "x$build_alias" = x; then + cross_compiling=maybe + $as_echo "$as_me: WARNING: if you wanted to set the --build type, don't use --host. + If a cross compiler is detected then cross compile mode will be used" >&2 + elif test "x$build_alias" != "x$host_alias"; then + cross_compiling=yes + fi +fi + +ac_tool_prefix= +test -n "$host_alias" && ac_tool_prefix=$host_alias- + +test "$silent" = yes && exec 6>/dev/null + + +ac_pwd=`pwd` && test -n "$ac_pwd" && +ac_ls_di=`ls -di .` && +ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || + as_fn_error $? "working directory cannot be determined" +test "X$ac_ls_di" = "X$ac_pwd_ls_di" || + as_fn_error $? "pwd does not report name of working directory" + + +# Find the source files, if location was not specified. +if test -z "$srcdir"; then + ac_srcdir_defaulted=yes + # Try the directory containing this script, then the parent directory. + ac_confdir=`$as_dirname -- "$as_myself" || +$as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_myself" : 'X\(//\)[^/]' \| \ + X"$as_myself" : 'X\(//\)$' \| \ + X"$as_myself" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$as_myself" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + srcdir=$ac_confdir + if test ! -r "$srcdir/$ac_unique_file"; then + srcdir=.. + fi +else + ac_srcdir_defaulted=no +fi +if test ! -r "$srcdir/$ac_unique_file"; then + test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." + as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir" +fi +ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" +ac_abs_confdir=`( + cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg" + pwd)` +# When building in place, set srcdir=. +if test "$ac_abs_confdir" = "$ac_pwd"; then + srcdir=. +fi +# Remove unnecessary trailing slashes from srcdir. +# Double slashes in file names in object file debugging info +# mess up M-x gdb in Emacs. +case $srcdir in +*/) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;; +esac +for ac_var in $ac_precious_vars; do + eval ac_env_${ac_var}_set=\${${ac_var}+set} + eval ac_env_${ac_var}_value=\$${ac_var} + eval ac_cv_env_${ac_var}_set=\${${ac_var}+set} + eval ac_cv_env_${ac_var}_value=\$${ac_var} +done + +# +# Report the --help message. +# +if test "$ac_init_help" = "long"; then + # Omit some internal or obsolete options to make the list less imposing. + # This message is too long to be a string in the A/UX 3.1 sh. + cat <<_ACEOF +\`configure' configures Zoltan 3.6 to adapt to many kinds of systems. + +Usage: $0 [OPTION]... [VAR=VALUE]... + +To assign environment variables (e.g., CC, CFLAGS...), specify them as +VAR=VALUE. See below for descriptions of some of the useful variables. + +Defaults for the options are specified in brackets. + +Configuration: + -h, --help display this help and exit + --help=short display options specific to this package + --help=recursive display the short help of all the included packages + -V, --version display version information and exit + -q, --quiet, --silent do not print \`checking ...' messages + --cache-file=FILE cache test results in FILE [disabled] + -C, --config-cache alias for \`--cache-file=config.cache' + -n, --no-create do not create output files + --srcdir=DIR find the sources in DIR [configure dir or \`..'] + +Installation directories: + --prefix=PREFIX install architecture-independent files in PREFIX + [$ac_default_prefix] + --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX + [PREFIX] + +By default, \`make install' will install all the files in +\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify +an installation prefix other than \`$ac_default_prefix' using \`--prefix', +for instance \`--prefix=\$HOME'. + +For better control, use the options below. + +Fine tuning of the installation directories: + --bindir=DIR user executables [EPREFIX/bin] + --sbindir=DIR system admin executables [EPREFIX/sbin] + --libexecdir=DIR program executables [EPREFIX/libexec] + --sysconfdir=DIR read-only single-machine data [PREFIX/etc] + --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] + --localstatedir=DIR modifiable single-machine data [PREFIX/var] + --libdir=DIR object code libraries [EPREFIX/lib] + --includedir=DIR C header files [PREFIX/include] + --oldincludedir=DIR C header files for non-gcc [/usr/include] + --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] + --datadir=DIR read-only architecture-independent data [DATAROOTDIR] + --infodir=DIR info documentation [DATAROOTDIR/info] + --localedir=DIR locale-dependent data [DATAROOTDIR/locale] + --mandir=DIR man documentation [DATAROOTDIR/man] + --docdir=DIR documentation root [DATAROOTDIR/doc/zoltan] + --htmldir=DIR html documentation [DOCDIR] + --dvidir=DIR dvi documentation [DOCDIR] + --pdfdir=DIR pdf documentation [DOCDIR] + --psdir=DIR ps documentation [DOCDIR] +_ACEOF + + cat <<\_ACEOF + +Program names: + --program-prefix=PREFIX prepend PREFIX to installed program names + --program-suffix=SUFFIX append SUFFIX to installed program names + --program-transform-name=PROGRAM run sed PROGRAM on installed program names + +System types: + --build=BUILD configure for building on BUILD [guessed] + --host=HOST cross-compile to build programs to run on HOST [BUILD] + --target=TARGET configure for building compilers for TARGET [HOST] +_ACEOF +fi + +if test -n "$ac_init_help"; then + case $ac_init_help in + short | recursive ) echo "Configuration of Zoltan 3.6:";; + esac + cat <<\_ACEOF + +Optional Features: + --disable-option-checking ignore unrecognized --enable/--with options + --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no) + --enable-FEATURE[=ARG] include FEATURE [ARG=yes] + --enable-maintainer-mode enable make rules and dependencies not useful + (and sometimes confusing) to the casual installer + --enable-f90interface enable Fortran 90 interface (automatically enables + Fortran support) (default is [no]) + --enable-zoltan-cppdriver + Enable Zoltan's C++ driver (default is [yes]) + --enable-mpi enable MPI support + --enable-mpi-recv-limit Set to the limit on the number of simultaneous MPI + posted receives, if any; default is + --enable-mpi-recv-limit=10 + --disable-dependency-tracking speeds up one-time build + --enable-dependency-tracking do not reject slow dependency extractors + --enable-export-makefiles + Creates export makefiles in the install (prefix) + directory. This option requires perl to be set in + your path or defined with --with-perl=. Note that the export makefiles are + always created and used in the build directory, but + will not be installable without this option to + change the paths. (default is yes) + --enable-gzip enable zlib support for driver (default is [no]) + --enable-tests Make tests for all Trilinos packages buildable with + 'make tests' (default is [yes]) + + --enable-zoltan-tests Make Zoltan tests buildable with 'make tests' + (default is yes if --disable-tests is not specified) + --enable-examples Make examples for all Trilinos packages buildable + with 'make examples' (default is [yes]) + --enable-zoltan-examples + Make Zoltan examples buildable with 'make examples' + (default is yes if --disable-examples is not + specified) + +Optional Packages: + --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] + --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) + --with-install=INSTALL_PROGRAM + Use the installation program INSTALL_PROGRAM rather + the default that is provided. For example + --with-install="/path/install -p" + --with-mpi=MPIROOT the MPI root directory (above bin,lib,include), + enables MPI + --with-mpi-compilers={yes/no/path} + Find MPI compilers/Don't use MPI compilers/Find MPI + compilers in path + --with-mpi-libs="LIBS" MPI libraries ["-lmpi"] + --with-mpi-incdir=DIR MPI include directory [MPIROOT/include] Do not use + -I + --with-mpi-libdir=DIR MPI library directory [MPIROOT/lib] Do not use -L + --with-ccflags additional [CCFLAGS] flags to be added: will prepend + to [CCFLAGS] + --with-cxxflags additional [CXXFLAGS] flags to be added: will + prepend to [CXXFLAGS] + --with-cflags additional [CFLAGS] flags to be added: will prepend + to [CFLAGS] + --with-fcflags additional [FCFLAGS] flags to be added: will prepend + to [FCFLAGS] + --with-libs List additional libraries here. For example, + --with-libs=-lsuperlu or + --with-libs=/path/libsuperlu.a + --with-ldflags additional [LDFLAGS] flags to be added: will prepend + to [LDFLAGS] + --with-ar override archiver command (default is "ar cru") + --with-perl supply a perl executable. For example + --with-perl=/usr/bin/perl. + --with-gnumake Gnu's make has special functions we can use to + eliminate redundant paths in the build and link + lines. Enable this if you use gnu-make to build + Trilinos. This requires that perl is in your path or + that you have specified the perl executable with + --with-perl=. Configure will check + for the existence of the perl executable and quit + with an error if it is not found. (default is no) + --with-id-type Zoltan global ID type: uint (default), ulong, or + ullong + --with-parmetis Enable Parmetis support. (default is [no]) + --with-parmetis-libdir Specify where the Parmetis library is located. Ex. + /path/to/library + --with-parmetis-incdir Specify where the Parmetis header files are located. + Ex. /path/to/headers + --with-scotch Enable Scotch support. (default is [no]) + --with-scotch-libdir Specify where the Scotch library is located. Ex. + /path/to/library + --with-scotch-incdir Specify where the Scotch header files are located. + Ex. /path/to/headers + --with-patoh Enable Patoh support. (default is [no]) + --with-patoh-libdir Specify where the Patoh library is located. Ex. + /path/to/library + --with-patoh-incdir Specify where the Patoh header files are located. + Ex. /path/to/headers + --with-nemesis_exodus Enable Nemesis/Exodus support for the Zdrive test + executable. Library and include paths must be + specified using LDFLAGS and CFLAGS. (default is + [no]) + --with-libdirs OBSOLETE use --with-ldflags instead. (ex. + --with-ldflags="-L -L") + --with-incdirs additional directories containing include files: + will prepend to search here for includes, use -Idir + format + +Some influential environment variables: + CC C compiler command + CFLAGS C compiler flags + LDFLAGS linker flags, e.g. -L if you have libraries in a + nonstandard directory + LIBS libraries to pass to the linker, e.g. -l + CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I if + you have headers in a nonstandard directory + CXX C++ compiler command + CXXFLAGS C++ compiler flags + FC Fortran compiler command + FCFLAGS Fortran compiler flags + CXXCPP C++ preprocessor + +Use these variables to override the choices made by `configure' or to help +it to find libraries and programs with nonstandard names/locations. + +Report bugs to . +_ACEOF +ac_status=$? +fi + +if test "$ac_init_help" = "recursive"; then + # If there are subdirs, report their specific --help. + for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue + test -d "$ac_dir" || + { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } || + continue + ac_builddir=. + +case "$ac_dir" in +.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; +*) + ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` + # A ".." for each directory in $ac_dir_suffix. + ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` + case $ac_top_builddir_sub in + "") ac_top_builddir_sub=. ac_top_build_prefix= ;; + *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; + esac ;; +esac +ac_abs_top_builddir=$ac_pwd +ac_abs_builddir=$ac_pwd$ac_dir_suffix +# for backward compatibility: +ac_top_builddir=$ac_top_build_prefix + +case $srcdir in + .) # We are building in place. + ac_srcdir=. + ac_top_srcdir=$ac_top_builddir_sub + ac_abs_top_srcdir=$ac_pwd ;; + [\\/]* | ?:[\\/]* ) # Absolute name. + ac_srcdir=$srcdir$ac_dir_suffix; + ac_top_srcdir=$srcdir + ac_abs_top_srcdir=$srcdir ;; + *) # Relative name. + ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix + ac_top_srcdir=$ac_top_build_prefix$srcdir + ac_abs_top_srcdir=$ac_pwd/$srcdir ;; +esac +ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix + + cd "$ac_dir" || { ac_status=$?; continue; } + # Check for guested configure. + if test -f "$ac_srcdir/configure.gnu"; then + echo && + $SHELL "$ac_srcdir/configure.gnu" --help=recursive + elif test -f "$ac_srcdir/configure"; then + echo && + $SHELL "$ac_srcdir/configure" --help=recursive + else + $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 + fi || ac_status=$? + cd "$ac_pwd" || { ac_status=$?; break; } + done +fi + +test -n "$ac_init_help" && exit $ac_status +if $ac_init_version; then + cat <<\_ACEOF +Zoltan configure 3.6 +generated by GNU Autoconf 2.68 + +Copyright (C) 2010 Free Software Foundation, Inc. +This configure script is free software; the Free Software Foundation +gives unlimited permission to copy, distribute and modify it. +_ACEOF + exit +fi + +## ------------------------ ## +## Autoconf initialization. ## +## ------------------------ ## + +# ac_fn_c_try_compile LINENO +# -------------------------- +# Try to compile conftest.$ac_ext, and return whether this succeeded. +ac_fn_c_try_compile () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + rm -f conftest.$ac_objext + if { { ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compile") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_c_try_compile + +# ac_fn_cxx_try_compile LINENO +# ---------------------------- +# Try to compile conftest.$ac_ext, and return whether this succeeded. +ac_fn_cxx_try_compile () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + rm -f conftest.$ac_objext + if { { ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compile") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_cxx_try_compile + +# ac_fn_fc_try_compile LINENO +# --------------------------- +# Try to compile conftest.$ac_ext, and return whether this succeeded. +ac_fn_fc_try_compile () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + rm -f conftest.$ac_objext + if { { ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compile") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_fc_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_fc_try_compile + +# ac_fn_cxx_try_cpp LINENO +# ------------------------ +# Try to preprocess conftest.$ac_ext, and return whether this succeeded. +ac_fn_cxx_try_cpp () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + if { { ac_try="$ac_cpp conftest.$ac_ext" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } > conftest.i && { + test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" || + test ! -s conftest.err + }; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_cxx_try_cpp + +# ac_fn_c_try_link LINENO +# ----------------------- +# Try to link conftest.$ac_ext, and return whether this succeeded. +ac_fn_c_try_link () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + rm -f conftest.$ac_objext conftest$ac_exeext + if { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + $as_test_x conftest$ac_exeext + }; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information + # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would + # interfere with the next link command; also delete a directory that is + # left behind by Apple's compiler. We do this before executing the actions. + rm -rf conftest.dSYM conftest_ipa8_conftest.oo + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_c_try_link +cat >config.log <<_ACEOF +This file contains any messages produced by compilers while +running configure, to aid debugging if configure makes a mistake. + +It was created by Zoltan $as_me 3.6, which was +generated by GNU Autoconf 2.68. Invocation command line was + + $ $0 $@ + +_ACEOF +exec 5>>config.log +{ +cat <<_ASUNAME +## --------- ## +## Platform. ## +## --------- ## + +hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` +uname -m = `(uname -m) 2>/dev/null || echo unknown` +uname -r = `(uname -r) 2>/dev/null || echo unknown` +uname -s = `(uname -s) 2>/dev/null || echo unknown` +uname -v = `(uname -v) 2>/dev/null || echo unknown` + +/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` +/bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` + +/bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` +/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` +/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` +/usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown` +/bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` +/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` +/bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` + +_ASUNAME + +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + $as_echo "PATH: $as_dir" + done +IFS=$as_save_IFS + +} >&5 + +cat >&5 <<_ACEOF + + +## ----------- ## +## Core tests. ## +## ----------- ## + +_ACEOF + + +# Keep a trace of the command line. +# Strip out --no-create and --no-recursion so they do not pile up. +# Strip out --silent because we don't want to record it for future runs. +# Also quote any args containing shell meta-characters. +# Make two passes to allow for proper duplicate-argument suppression. +ac_configure_args= +ac_configure_args0= +ac_configure_args1= +ac_must_keep_next=false +for ac_pass in 1 2 +do + for ac_arg + do + case $ac_arg in + -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil) + continue ;; + *\'*) + ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; + esac + case $ac_pass in + 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;; + 2) + as_fn_append ac_configure_args1 " '$ac_arg'" + if test $ac_must_keep_next = true; then + ac_must_keep_next=false # Got value, back to normal. + else + case $ac_arg in + *=* | --config-cache | -C | -disable-* | --disable-* \ + | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ + | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ + | -with-* | --with-* | -without-* | --without-* | --x) + case "$ac_configure_args0 " in + "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; + esac + ;; + -* ) ac_must_keep_next=true ;; + esac + fi + as_fn_append ac_configure_args " '$ac_arg'" + ;; + esac + done +done +{ ac_configure_args0=; unset ac_configure_args0;} +{ ac_configure_args1=; unset ac_configure_args1;} + +# When interrupted or exit'd, cleanup temporary files, and complete +# config.log. We remove comments because anyway the quotes in there +# would cause problems or look ugly. +# WARNING: Use '\'' to represent an apostrophe within the trap. +# WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. +trap 'exit_status=$? + # Save into config.log some information that might help in debugging. + { + echo + + $as_echo "## ---------------- ## +## Cache variables. ## +## ---------------- ##" + echo + # The following way of writing the cache mishandles newlines in values, +( + for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do + eval ac_val=\$$ac_var + case $ac_val in #( + *${as_nl}*) + case $ac_var in #( + *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 +$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; + esac + case $ac_var in #( + _ | IFS | as_nl) ;; #( + BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( + *) { eval $ac_var=; unset $ac_var;} ;; + esac ;; + esac + done + (set) 2>&1 | + case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #( + *${as_nl}ac_space=\ *) + sed -n \ + "s/'\''/'\''\\\\'\'''\''/g; + s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p" + ;; #( + *) + sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" + ;; + esac | + sort +) + echo + + $as_echo "## ----------------- ## +## Output variables. ## +## ----------------- ##" + echo + for ac_var in $ac_subst_vars + do + eval ac_val=\$$ac_var + case $ac_val in + *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; + esac + $as_echo "$ac_var='\''$ac_val'\''" + done | sort + echo + + if test -n "$ac_subst_files"; then + $as_echo "## ------------------- ## +## File substitutions. ## +## ------------------- ##" + echo + for ac_var in $ac_subst_files + do + eval ac_val=\$$ac_var + case $ac_val in + *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; + esac + $as_echo "$ac_var='\''$ac_val'\''" + done | sort + echo + fi + + if test -s confdefs.h; then + $as_echo "## ----------- ## +## confdefs.h. ## +## ----------- ##" + echo + cat confdefs.h + echo + fi + test "$ac_signal" != 0 && + $as_echo "$as_me: caught signal $ac_signal" + $as_echo "$as_me: exit $exit_status" + } >&5 + rm -f core *.core core.conftest.* && + rm -f -r conftest* confdefs* conf$$* $ac_clean_files && + exit $exit_status +' 0 +for ac_signal in 1 2 13 15; do + trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal +done +ac_signal=0 + +# confdefs.h avoids OS command line length limits that DEFS can exceed. +rm -f -r conftest* confdefs.h + +$as_echo "/* confdefs.h */" > confdefs.h + +# Predefined preprocessor variables. + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_NAME "$PACKAGE_NAME" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_TARNAME "$PACKAGE_TARNAME" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_VERSION "$PACKAGE_VERSION" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_STRING "$PACKAGE_STRING" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_URL "$PACKAGE_URL" +_ACEOF + + +# Let the site file select an alternate cache file if it wants to. +# Prefer an explicitly selected file to automatically selected ones. +ac_site_file1=NONE +ac_site_file2=NONE +if test -n "$CONFIG_SITE"; then + # We do not want a PATH search for config.site. + case $CONFIG_SITE in #(( + -*) ac_site_file1=./$CONFIG_SITE;; + */*) ac_site_file1=$CONFIG_SITE;; + *) ac_site_file1=./$CONFIG_SITE;; + esac +elif test "x$prefix" != xNONE; then + ac_site_file1=$prefix/share/config.site + ac_site_file2=$prefix/etc/config.site +else + ac_site_file1=$ac_default_prefix/share/config.site + ac_site_file2=$ac_default_prefix/etc/config.site +fi +for ac_site_file in "$ac_site_file1" "$ac_site_file2" +do + test "x$ac_site_file" = xNONE && continue + if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5 +$as_echo "$as_me: loading site script $ac_site_file" >&6;} + sed 's/^/| /' "$ac_site_file" >&5 + . "$ac_site_file" \ + || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "failed to load site script $ac_site_file +See \`config.log' for more details" "$LINENO" 5; } + fi +done + +if test -r "$cache_file"; then + # Some versions of bash will fail to source /dev/null (special files + # actually), so we avoid doing that. DJGPP emulates it as a regular file. + if test /dev/null != "$cache_file" && test -f "$cache_file"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5 +$as_echo "$as_me: loading cache $cache_file" >&6;} + case $cache_file in + [\\/]* | ?:[\\/]* ) . "$cache_file";; + *) . "./$cache_file";; + esac + fi +else + { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5 +$as_echo "$as_me: creating cache $cache_file" >&6;} + >$cache_file +fi + +# Check that the precious variables saved in the cache have kept the same +# value. +ac_cache_corrupted=false +for ac_var in $ac_precious_vars; do + eval ac_old_set=\$ac_cv_env_${ac_var}_set + eval ac_new_set=\$ac_env_${ac_var}_set + eval ac_old_val=\$ac_cv_env_${ac_var}_value + eval ac_new_val=\$ac_env_${ac_var}_value + case $ac_old_set,$ac_new_set in + set,) + { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 +$as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} + ac_cache_corrupted=: ;; + ,set) + { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5 +$as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} + ac_cache_corrupted=: ;; + ,);; + *) + if test "x$ac_old_val" != "x$ac_new_val"; then + # differences in whitespace do not lead to failure. + ac_old_val_w=`echo x $ac_old_val` + ac_new_val_w=`echo x $ac_new_val` + if test "$ac_old_val_w" != "$ac_new_val_w"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5 +$as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} + ac_cache_corrupted=: + else + { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 +$as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} + eval $ac_var=\$ac_old_val + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5 +$as_echo "$as_me: former value: \`$ac_old_val'" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5 +$as_echo "$as_me: current value: \`$ac_new_val'" >&2;} + fi;; + esac + # Pass precious variables to config.status. + if test "$ac_new_set" = set; then + case $ac_new_val in + *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; + *) ac_arg=$ac_var=$ac_new_val ;; + esac + case " $ac_configure_args " in + *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. + *) as_fn_append ac_configure_args " '$ac_arg'" ;; + esac + fi +done +if $ac_cache_corrupted; then + { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5 +$as_echo "$as_me: error: changes in the environment can compromise the build" >&2;} + as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5 +fi +## -------------------- ## +## Main body of script. ## +## -------------------- ## + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + + +echo "----------------------------------------" +echo "Running Zoltan Configure Script" +echo "----------------------------------------" + +# This is to protect against accidentally specifying the wrong +# directory with --srcdir. Any file in that directory will do, +# preferably one that is unlikely to be removed or renamed. + + + +# Specify directory for auxillary build tools (e.g., install-sh, +# config.sub, config.guess) and M4 files. + +ac_aux_dir= +for ac_dir in config "$srcdir"/config; do + if test -f "$ac_dir/install-sh"; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/install-sh -c" + break + elif test -f "$ac_dir/install.sh"; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/install.sh -c" + break + elif test -f "$ac_dir/shtool"; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/shtool install -c" + break + fi +done +if test -z "$ac_aux_dir"; then + as_fn_error $? "cannot find install-sh, install.sh, or shtool in config \"$srcdir\"/config" "$LINENO" 5 +fi + +# These three variables are undocumented and unsupported, +# and are intended to be withdrawn in a future Autoconf release. +# They can cause serious problems if a builder's source tree is in a directory +# whose full name contains unusual characters. +ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var. +ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var. +ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var. + + + +# +# We don't want people to configure in the source directory. Some +# things may break. +# + +if test -e configure.ac ; then + echo "You are trying to run configure in the source directory. This is not allowed. Please run configure from a separate build directory." + exit +fi + +# Configure should create src/Zoltan_config.h from src/Zoltan_config.h.in +ac_config_headers="$ac_config_headers src/include/Zoltan_config.h:src/include/Zoltan_config.h.in" + + +# Allow users to specify their own "install" command. If none is specified, +# the default is install-sh found in the config subdirectory. + + +# Check whether --with-install was given. +if test "${with_install+set}" = set; then : + withval=$with_install; + INSTALL=$withval + INSTALL_PROGRAM=$withval + INSTALL_SCRIPT=$withval + INSTALL_DATA="$withval -m 644" + +fi + + +# AM_MAINTAINER_MODE turns off maintainer-only makefile targets by +# default, and changes configure to understand a +# --enable-maintainer-mode option. --enable-maintainer-mode turns the +# maintainer-only targets back on. The maintainer-only makefile +# targets permit end users to clean automatically-generated files such +# as configure, which means they have to have autoconf and automake +# installed to repair the damage. AM_MAINTAINER_MODE makes it a bit +# harder for users to shoot themselves in the foot. + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to enable maintainer-specific portions of Makefiles" >&5 +$as_echo_n "checking whether to enable maintainer-specific portions of Makefiles... " >&6; } + # Check whether --enable-maintainer-mode was given. +if test "${enable_maintainer_mode+set}" = set; then : + enableval=$enable_maintainer_mode; USE_MAINTAINER_MODE=$enableval +else + USE_MAINTAINER_MODE=no +fi + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $USE_MAINTAINER_MODE" >&5 +$as_echo "$USE_MAINTAINER_MODE" >&6; } + if test $USE_MAINTAINER_MODE = yes; then + MAINTAINER_MODE_TRUE= + MAINTAINER_MODE_FALSE='#' +else + MAINTAINER_MODE_TRUE='#' + MAINTAINER_MODE_FALSE= +fi + + MAINT=$MAINTAINER_MODE_TRUE + + + +# Define $build, $host, $target, etc + +# Make sure we can run config.sub. +$SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 || + as_fn_error $? "cannot run $SHELL $ac_aux_dir/config.sub" "$LINENO" 5 + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking build system type" >&5 +$as_echo_n "checking build system type... " >&6; } +if ${ac_cv_build+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_build_alias=$build_alias +test "x$ac_build_alias" = x && + ac_build_alias=`$SHELL "$ac_aux_dir/config.guess"` +test "x$ac_build_alias" = x && + as_fn_error $? "cannot guess build type; you must specify one" "$LINENO" 5 +ac_cv_build=`$SHELL "$ac_aux_dir/config.sub" $ac_build_alias` || + as_fn_error $? "$SHELL $ac_aux_dir/config.sub $ac_build_alias failed" "$LINENO" 5 + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_build" >&5 +$as_echo "$ac_cv_build" >&6; } +case $ac_cv_build in +*-*-*) ;; +*) as_fn_error $? "invalid value of canonical build" "$LINENO" 5;; +esac +build=$ac_cv_build +ac_save_IFS=$IFS; IFS='-' +set x $ac_cv_build +shift +build_cpu=$1 +build_vendor=$2 +shift; shift +# Remember, the first character of IFS is used to create $*, +# except with old shells: +build_os=$* +IFS=$ac_save_IFS +case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking host system type" >&5 +$as_echo_n "checking host system type... " >&6; } +if ${ac_cv_host+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test "x$host_alias" = x; then + ac_cv_host=$ac_cv_build +else + ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` || + as_fn_error $? "$SHELL $ac_aux_dir/config.sub $host_alias failed" "$LINENO" 5 +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_host" >&5 +$as_echo "$ac_cv_host" >&6; } +case $ac_cv_host in +*-*-*) ;; +*) as_fn_error $? "invalid value of canonical host" "$LINENO" 5;; +esac +host=$ac_cv_host +ac_save_IFS=$IFS; IFS='-' +set x $ac_cv_host +shift +host_cpu=$1 +host_vendor=$2 +shift; shift +# Remember, the first character of IFS is used to create $*, +# except with old shells: +host_os=$* +IFS=$ac_save_IFS +case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking target system type" >&5 +$as_echo_n "checking target system type... " >&6; } +if ${ac_cv_target+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test "x$target_alias" = x; then + ac_cv_target=$ac_cv_host +else + ac_cv_target=`$SHELL "$ac_aux_dir/config.sub" $target_alias` || + as_fn_error $? "$SHELL $ac_aux_dir/config.sub $target_alias failed" "$LINENO" 5 +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_target" >&5 +$as_echo "$ac_cv_target" >&6; } +case $ac_cv_target in +*-*-*) ;; +*) as_fn_error $? "invalid value of canonical target" "$LINENO" 5;; +esac +target=$ac_cv_target +ac_save_IFS=$IFS; IFS='-' +set x $ac_cv_target +shift +target_cpu=$1 +target_vendor=$2 +shift; shift +# Remember, the first character of IFS is used to create $*, +# except with old shells: +target_os=$* +IFS=$ac_save_IFS +case $target_os in *\ *) target_os=`echo "$target_os" | sed 's/ /-/g'`;; esac + + +# The aliases save the names the user supplied, while $host etc. +# will get canonicalized. +test -n "$target_alias" && + test "$program_prefix$program_suffix$program_transform_name" = \ + NONENONEs,x,x, && + program_prefix=${target_alias}- + +# Note in header file and Makefile conditional what the host OS is + + if false; then + HOST_CYGWIN_TRUE= + HOST_CYGWIN_FALSE='#' +else + HOST_CYGWIN_TRUE='#' + HOST_CYGWIN_FALSE= +fi + + if false; then + HOST_LINUX_TRUE= + HOST_LINUX_FALSE='#' +else + HOST_LINUX_TRUE='#' + HOST_LINUX_FALSE= +fi + + if false; then + HOST_SOLARIS_TRUE= + HOST_SOLARIS_FALSE='#' +else + HOST_SOLARIS_TRUE='#' + HOST_SOLARIS_FALSE= +fi + + +case $host_os in + cygwin) + if true; then + HOST_CYGWIN_TRUE= + HOST_CYGWIN_FALSE='#' +else + HOST_CYGWIN_TRUE='#' + HOST_CYGWIN_FALSE= +fi + + +$as_echo "#define HOST_CYGWIN 1" >>confdefs.h + + ;; + linux*) + if true; then + HOST_LINUX_TRUE= + HOST_LINUX_FALSE='#' +else + HOST_LINUX_TRUE='#' + HOST_LINUX_FALSE= +fi + + +$as_echo "#define HOST_LINUX 1" >>confdefs.h + + ;; + solaris*) + if true; then + HOST_SOLARIS_TRUE= + HOST_SOLARIS_FALSE='#' +else + HOST_SOLARIS_TRUE='#' + HOST_SOLARIS_FALSE= +fi + + +$as_echo "#define HOST_SOLARIS 1" >>confdefs.h + + ;; +esac + +# Use automake + +# - Required version of automake. +am__api_version='1.11' + +# Find a good install program. We prefer a C program (faster), +# so one script is as good as another. But avoid the broken or +# incompatible versions: +# SysV /etc/install, /usr/sbin/install +# SunOS /usr/etc/install +# IRIX /sbin/install +# AIX /bin/install +# AmigaOS /C/install, which installs bootblocks on floppy discs +# AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag +# AFS /usr/afsws/bin/install, which mishandles nonexistent args +# SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff" +# OS/2's system install, which has a completely different semantic +# ./install, which can be erroneously created by make from ./install.sh. +# Reject install programs that cannot install multiple files. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a BSD-compatible install" >&5 +$as_echo_n "checking for a BSD-compatible install... " >&6; } +if test -z "$INSTALL"; then +if ${ac_cv_path_install+:} false; then : + $as_echo_n "(cached) " >&6 +else + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + # Account for people who put trailing slashes in PATH elements. +case $as_dir/ in #(( + ./ | .// | /[cC]/* | \ + /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \ + ?:[\\/]os2[\\/]install[\\/]* | ?:[\\/]OS2[\\/]INSTALL[\\/]* | \ + /usr/ucb/* ) ;; + *) + # OSF1 and SCO ODT 3.0 have their own names for install. + # Don't use installbsd from OSF since it installs stuff as root + # by default. + for ac_prog in ginstall scoinst install; do + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_prog$ac_exec_ext" && $as_test_x "$as_dir/$ac_prog$ac_exec_ext"; }; then + if test $ac_prog = install && + grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then + # AIX install. It has an incompatible calling convention. + : + elif test $ac_prog = install && + grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then + # program-specific install script used by HP pwplus--don't use. + : + else + rm -rf conftest.one conftest.two conftest.dir + echo one > conftest.one + echo two > conftest.two + mkdir conftest.dir + if "$as_dir/$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir" && + test -s conftest.one && test -s conftest.two && + test -s conftest.dir/conftest.one && + test -s conftest.dir/conftest.two + then + ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c" + break 3 + fi + fi + fi + done + done + ;; +esac + + done +IFS=$as_save_IFS + +rm -rf conftest.one conftest.two conftest.dir + +fi + if test "${ac_cv_path_install+set}" = set; then + INSTALL=$ac_cv_path_install + else + # As a last resort, use the slow shell script. Don't cache a + # value for INSTALL within a source directory, because that will + # break other packages using the cache if that directory is + # removed, or if the value is a relative name. + INSTALL=$ac_install_sh + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $INSTALL" >&5 +$as_echo "$INSTALL" >&6; } + +# Use test -z because SunOS4 sh mishandles braces in ${var-val}. +# It thinks the first close brace ends the variable substitution. +test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}' + +test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}' + +test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644' + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether build environment is sane" >&5 +$as_echo_n "checking whether build environment is sane... " >&6; } +# Just in case +sleep 1 +echo timestamp > conftest.file +# Reject unsafe characters in $srcdir or the absolute working directory +# name. Accept space and tab only in the latter. +am_lf=' +' +case `pwd` in + *[\\\"\#\$\&\'\`$am_lf]*) + as_fn_error $? "unsafe absolute working directory name" "$LINENO" 5;; +esac +case $srcdir in + *[\\\"\#\$\&\'\`$am_lf\ \ ]*) + as_fn_error $? "unsafe srcdir value: \`$srcdir'" "$LINENO" 5;; +esac + +# Do `set' in a subshell so we don't clobber the current shell's +# arguments. Must try -L first in case configure is actually a +# symlink; some systems play weird games with the mod time of symlinks +# (eg FreeBSD returns the mod time of the symlink's containing +# directory). +if ( + set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null` + if test "$*" = "X"; then + # -L didn't work. + set X `ls -t "$srcdir/configure" conftest.file` + fi + rm -f conftest.file + if test "$*" != "X $srcdir/configure conftest.file" \ + && test "$*" != "X conftest.file $srcdir/configure"; then + + # If neither matched, then we have a broken ls. This can happen + # if, for instance, CONFIG_SHELL is bash and it inherits a + # broken ls alias from the environment. This has actually + # happened. Such a system could not be considered "sane". + as_fn_error $? "ls -t appears to fail. Make sure there is not a broken +alias in your environment" "$LINENO" 5 + fi + + test "$2" = conftest.file + ) +then + # Ok. + : +else + as_fn_error $? "newly created file is older than distributed files! +Check your system clock" "$LINENO" 5 +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +test "$program_prefix" != NONE && + program_transform_name="s&^&$program_prefix&;$program_transform_name" +# Use a double $ so make ignores it. +test "$program_suffix" != NONE && + program_transform_name="s&\$&$program_suffix&;$program_transform_name" +# Double any \ or $. +# By default was `s,x,x', remove it if useless. +ac_script='s/[\\$]/&&/g;s/;s,x,x,$//' +program_transform_name=`$as_echo "$program_transform_name" | sed "$ac_script"` + +# expand $ac_aux_dir to an absolute path +am_aux_dir=`cd $ac_aux_dir && pwd` + +if test x"${MISSING+set}" != xset; then + case $am_aux_dir in + *\ * | *\ *) + MISSING="\${SHELL} \"$am_aux_dir/missing\"" ;; + *) + MISSING="\${SHELL} $am_aux_dir/missing" ;; + esac +fi +# Use eval to expand $SHELL +if eval "$MISSING --run true"; then + am_missing_run="$MISSING --run " +else + am_missing_run= + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: \`missing' script is too old or missing" >&5 +$as_echo "$as_me: WARNING: \`missing' script is too old or missing" >&2;} +fi + +if test x"${install_sh}" != xset; then + case $am_aux_dir in + *\ * | *\ *) + install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;; + *) + install_sh="\${SHELL} $am_aux_dir/install-sh" + esac +fi + +# Installed binaries are usually stripped using `strip' when the user +# run `make install-strip'. However `strip' might not be the right +# tool to use in cross-compilation environments, therefore Automake +# will honor the `STRIP' environment variable to overrule this program. +if test "$cross_compiling" != no; then + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. +set dummy ${ac_tool_prefix}strip; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_STRIP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$STRIP"; then + ac_cv_prog_STRIP="$STRIP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_STRIP="${ac_tool_prefix}strip" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +STRIP=$ac_cv_prog_STRIP +if test -n "$STRIP"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5 +$as_echo "$STRIP" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_STRIP"; then + ac_ct_STRIP=$STRIP + # Extract the first word of "strip", so it can be a program name with args. +set dummy strip; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_STRIP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_STRIP"; then + ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_STRIP="strip" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP +if test -n "$ac_ct_STRIP"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5 +$as_echo "$ac_ct_STRIP" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_STRIP" = x; then + STRIP=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + STRIP=$ac_ct_STRIP + fi +else + STRIP="$ac_cv_prog_STRIP" +fi + +fi +INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s" + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a thread-safe mkdir -p" >&5 +$as_echo_n "checking for a thread-safe mkdir -p... " >&6; } +if test -z "$MKDIR_P"; then + if ${ac_cv_path_mkdir+:} false; then : + $as_echo_n "(cached) " >&6 +else + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/opt/sfw/bin +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in mkdir gmkdir; do + for ac_exec_ext in '' $ac_executable_extensions; do + { test -f "$as_dir/$ac_prog$ac_exec_ext" && $as_test_x "$as_dir/$ac_prog$ac_exec_ext"; } || continue + case `"$as_dir/$ac_prog$ac_exec_ext" --version 2>&1` in #( + 'mkdir (GNU coreutils) '* | \ + 'mkdir (coreutils) '* | \ + 'mkdir (fileutils) '4.1*) + ac_cv_path_mkdir=$as_dir/$ac_prog$ac_exec_ext + break 3;; + esac + done + done + done +IFS=$as_save_IFS + +fi + + test -d ./--version && rmdir ./--version + if test "${ac_cv_path_mkdir+set}" = set; then + MKDIR_P="$ac_cv_path_mkdir -p" + else + # As a last resort, use the slow shell script. Don't cache a + # value for MKDIR_P within a source directory, because that will + # break other packages using the cache if that directory is + # removed, or if the value is a relative name. + MKDIR_P="$ac_install_sh -d" + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $MKDIR_P" >&5 +$as_echo "$MKDIR_P" >&6; } + +mkdir_p="$MKDIR_P" +case $mkdir_p in + [\\/$]* | ?:[\\/]*) ;; + */*) mkdir_p="\$(top_builddir)/$mkdir_p" ;; +esac + +for ac_prog in gawk mawk nawk awk +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_AWK+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$AWK"; then + ac_cv_prog_AWK="$AWK" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_AWK="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +AWK=$ac_cv_prog_AWK +if test -n "$AWK"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AWK" >&5 +$as_echo "$AWK" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$AWK" && break +done + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} sets \$(MAKE)" >&5 +$as_echo_n "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; } +set x ${MAKE-make} +ac_make=`$as_echo "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'` +if eval \${ac_cv_prog_make_${ac_make}_set+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat >conftest.make <<\_ACEOF +SHELL = /bin/sh +all: + @echo '@@@%%%=$(MAKE)=@@@%%%' +_ACEOF +# GNU make sometimes prints "make[1]: Entering ...", which would confuse us. +case `${MAKE-make} -f conftest.make 2>/dev/null` in + *@@@%%%=?*=@@@%%%*) + eval ac_cv_prog_make_${ac_make}_set=yes;; + *) + eval ac_cv_prog_make_${ac_make}_set=no;; +esac +rm -f conftest.make +fi +if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + SET_MAKE= +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + SET_MAKE="MAKE=${MAKE-make}" +fi + +rm -rf .tst 2>/dev/null +mkdir .tst 2>/dev/null +if test -d .tst; then + am__leading_dot=. +else + am__leading_dot=_ +fi +rmdir .tst 2>/dev/null + +if test "`cd $srcdir && pwd`" != "`pwd`"; then + # Use -I$(srcdir) only when $(srcdir) != ., so that make's output + # is not polluted with repeated "-I." + am__isrc=' -I$(srcdir)' + # test to see if srcdir already configured + if test -f $srcdir/config.status; then + as_fn_error $? "source directory already configured; run \"make distclean\" there first" "$LINENO" 5 + fi +fi + +# test whether we have cygpath +if test -z "$CYGPATH_W"; then + if (cygpath --version) >/dev/null 2>/dev/null; then + CYGPATH_W='cygpath -w' + else + CYGPATH_W=echo + fi +fi + + +# Define the identity of the package. + PACKAGE='zoltan' + VERSION='3.6' + + +# Some tools Automake needs. + +ACLOCAL=${ACLOCAL-"${am_missing_run}aclocal-${am__api_version}"} + + +AUTOCONF=${AUTOCONF-"${am_missing_run}autoconf"} + + +AUTOMAKE=${AUTOMAKE-"${am_missing_run}automake-${am__api_version}"} + + +AUTOHEADER=${AUTOHEADER-"${am_missing_run}autoheader"} + + +MAKEINFO=${MAKEINFO-"${am_missing_run}makeinfo"} + +# We need awk for the "check" target. The system "awk" is bad on +# some platforms. +# Always define AMTAR for backward compatibility. Yes, it's still used +# in the wild :-( We should find a proper way to deprecate it ... +AMTAR='$${TAR-tar}' + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to create a ustar tar archive" >&5 +$as_echo_n "checking how to create a ustar tar archive... " >&6; } +# Loop over all known methods to create a tar archive until one works. +_am_tools='gnutar plaintar pax cpio none' +_am_tools=${am_cv_prog_tar_ustar-$_am_tools} +# Do not fold the above two line into one, because Tru64 sh and +# Solaris sh will not grok spaces in the rhs of `-'. +for _am_tool in $_am_tools +do + case $_am_tool in + gnutar) + for _am_tar in tar gnutar gtar; + do + { echo "$as_me:$LINENO: $_am_tar --version" >&5 + ($_am_tar --version) >&5 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && break + done + am__tar="$_am_tar --format=ustar -chf - "'"$$tardir"' + am__tar_="$_am_tar --format=ustar -chf - "'"$tardir"' + am__untar="$_am_tar -xf -" + ;; + plaintar) + # Must skip GNU tar: if it does not support --format= it doesn't create + # ustar tarball either. + (tar --version) >/dev/null 2>&1 && continue + am__tar='tar chf - "$$tardir"' + am__tar_='tar chf - "$tardir"' + am__untar='tar xf -' + ;; + pax) + am__tar='pax -L -x ustar -w "$$tardir"' + am__tar_='pax -L -x ustar -w "$tardir"' + am__untar='pax -r' + ;; + cpio) + am__tar='find "$$tardir" -print | cpio -o -H ustar -L' + am__tar_='find "$tardir" -print | cpio -o -H ustar -L' + am__untar='cpio -i -H ustar -d' + ;; + none) + am__tar=false + am__tar_=false + am__untar=false + ;; + esac + + # If the value was cached, stop now. We just wanted to have am__tar + # and am__untar set. + test -n "${am_cv_prog_tar_ustar}" && break + + # tar/untar a dummy directory, and stop if the command works + rm -rf conftest.dir + mkdir conftest.dir + echo GrepMe > conftest.dir/file + { echo "$as_me:$LINENO: tardir=conftest.dir && eval $am__tar_ >conftest.tar" >&5 + (tardir=conftest.dir && eval $am__tar_ >conftest.tar) >&5 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } + rm -rf conftest.dir + if test -s conftest.tar; then + { echo "$as_me:$LINENO: $am__untar &5 + ($am__untar &5 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } + grep GrepMe conftest.dir/file >/dev/null 2>&1 && break + fi +done +rm -rf conftest.dir + +if ${am_cv_prog_tar_ustar+:} false; then : + $as_echo_n "(cached) " >&6 +else + am_cv_prog_tar_ustar=$_am_tool +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_prog_tar_ustar" >&5 +$as_echo "$am_cv_prog_tar_ustar" >&6; } + + + + + + +# Specify required version of autoconf. + + + +#TAC_ARG_ENABLE_OPTION(fortran, [enable Fortran support], FORTRAN_SUPPORT, no) +#This option is not currently available + +# Check whether --enable-f90interface was given. +if test "${enable_f90interface+set}" = set; then : + enableval=$enable_f90interface; ac_cv_use_f90interface=$enableval +else + ac_cv_use_f90interface=no +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to use f90interface" >&5 +$as_echo_n "checking whether to use f90interface... " >&6; } + +if test "X$ac_cv_use_f90interface" != "Xno"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + +$as_echo "#define HAVE_F90INTERFACE 1" >>confdefs.h + +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "X$ac_cv_use_f90interface" != "Xno"; then + BUILD_ZOLTAN_F90_INTERFACE_TRUE= + BUILD_ZOLTAN_F90_INTERFACE_FALSE='#' +else + BUILD_ZOLTAN_F90_INTERFACE_TRUE='#' + BUILD_ZOLTAN_F90_INTERFACE_FALSE= +fi + +#AM_CONDITIONAL(USE_FORTRAN, [test "X$ac_cv_use_fortran" != "Xno"]) + +if test "X$ac_cv_use_f90interface" = "Xyes"; then + ac_cv_use_fortran=yes +else + ac_cv_use_fortran=no +fi + +if test "X$ac_cv_use_fortran" = "Xyes"; then + ac_cv_use_fortran90=yes +# AX_F90_MODULE_FLAG +# if test "X$ax_cv_f90_modflag" = "Xunknown" ; then +# AC_MSG_ERROR([unable to find f90 modules extension]) +# else +# FCFLAGS="$ax_cv_f90_modflag../ $ax_cv_f90_modflag. ${FCFLAGS}" +# fi +else + ac_cv_use_fortran90=no +fi + + +# Check whether --enable-zoltan-cppdriver was given. +if test "${enable_zoltan_cppdriver+set}" = set; then : + enableval=$enable_zoltan_cppdriver; ac_cv_use_zoltan_cppdriver=$enableval +else + ac_cv_use_zoltan_cppdriver=yes +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to use zoltan-cppdriver" >&5 +$as_echo_n "checking whether to use zoltan-cppdriver... " >&6; } + +if test "X$ac_cv_use_zoltan_cppdriver" != "Xno"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + +$as_echo "#define HAVE_ZOLTAN_CPPDRIVER /**/" >>confdefs.h + +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "X$ac_cv_use_zoltan_cppdriver" != "Xno"; then + HAVE_ZOLTAN_CPPDRIVER_TRUE= + HAVE_ZOLTAN_CPPDRIVER_FALSE='#' +else + HAVE_ZOLTAN_CPPDRIVER_TRUE='#' + HAVE_ZOLTAN_CPPDRIVER_FALSE= +fi + + +#This can be removed after we retire the old build system +#AC_DEFINE([TRILINOS_CONFIG_H],,[Define when using the autotools to build Zoltan]) + +# ------------------------------------------------------------------------ +# Check to see if MPI enabled and if any special configuration done +# ------------------------------------------------------------------------ + +# We may want to handle this differently because Zoltan requires MPI +#TAC_ARG_CONFIG_MPI + + + +HAVE_PKG_MPI=unset +SEEK_MPI_COMPILERS=unset +MPI_COMPILER_PATH=unset + +# Check whether --enable-mpi was given. +if test "${enable_mpi+set}" = set; then : + enableval=$enable_mpi; + if test X${enableval} = Xno; then + HAVE_PKG_MPI=no + else + HAVE_PKG_MPI=yes + fi + + +fi + + + +# Check whether --with-mpi was given. +if test "${with_mpi+set}" = set; then : + withval=$with_mpi; + if test X${withval} = Xno; then + HAVE_PKG_MPI=no + else + HAVE_PKG_MPI=yes + if test X${withval} != Xyes; then + MPI_DIR=${withval} + fi + fi + + +fi + + + +# Check whether --with-mpi-compilers was given. +if test "${with_mpi_compilers+set}" = set; then : + withval=$with_mpi_compilers; + HAVE_PKG_MPI=yes + if test X${withval} = Xno; then + SEEK_MPI_COMPILERS=no + else + SEEK_MPI_COMPILERS=yes + if test X${withval} != Xyes; then + MPI_COMPILER_PATH=${withval} + fi + fi + +else + + if test X${HAVE_PKG_MPI} != Xno; then + SEEK_MPI_COMPILERS=yes + HAVE_PKG_MPI=yes + fi + + +fi + + + +if test X${HAVE_PKG_MPI} = unset ; then + HAVE_PKG_MPI=yes +fi + +if test X${SEEK_MPI_COMPILERS} = Xyes; then + + if test X${MPI_COMPILER_PATH} != Xunset ; then +# MPI_SEEK_PATH=$MPI_COMPILER_PATH$PATH_SEPARATOR$PATH + MPI_SEEK_PATH=$MPI_COMPILER_PATH + elif test -n "${MPI_DIR}" ; then +# MPI_SEEK_PATH=$MPI_DIR/bin$PATH_SEPARATOR$PATH + MPI_SEEK_PATH=$MPI_DIR/bin + else + MPI_SEEK_PATH=$PATH + fi + + + if test -z "${MPI_CC}"; then + if test -f "${CC}"; then + + MPI_CC=${CC} + + else + + if test -n "${CC}" ; then + MPI_CC_CANDIDATE=${CC} + else + MPI_CC_CANDIDATE=mpicc + fi + + # Extract the first word of "${MPI_CC_CANDIDATE}", so it can be a program name with args. +set dummy ${MPI_CC_CANDIDATE}; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_MPI_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $MPI_CC in + [\\/]* | ?:[\\/]*) + ac_cv_path_MPI_CC="$MPI_CC" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in PATH = ${MPI_SEEK_PATH} +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_path_MPI_CC="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + test -z "$ac_cv_path_MPI_CC" && ac_cv_path_MPI_CC="notFound" + ;; +esac +fi +MPI_CC=$ac_cv_path_MPI_CC +if test -n "$MPI_CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MPI_CC" >&5 +$as_echo "$MPI_CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + + if test "${MPI_CC}" != "notFound" ; then + CC=${MPI_CC} + else + echo "-----" + echo "Cannot find MPI C compiler in " ${MPI_SEEK_PATH} + echo "Specify a path to all mpi compilers with --with-mpi-compilers=PATH," + echo "or specify a path to top mpi directory (above bin) with --with-mpi=PATH," + echo "or specify a C compiler using CC=" + echo "or --disable-mpi" + echo "-----" + as_fn_error $? "MPI C compiler not found." "$LINENO" 5 + fi + fi + fi + + if test "X$ac_cv_use_zoltan_cppdriver" = "Xyes"; then + + if test -z "${MPI_CXX}"; then + + if test -f "${CXX}"; then + + MPI_CXX=${CXX} + + else + + if test -n "${CXX}" ; then + MPI_CXX_CANDIDATES=${CXX} + else + MPI_CXX_CANDIDATES="mpicxx mpic++ mpiCC" + fi + + for ac_prog in ${MPI_CXX_CANDIDATES} +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_MPI_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $MPI_CXX in + [\\/]* | ?:[\\/]*) + ac_cv_path_MPI_CXX="$MPI_CXX" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in PATH = ${MPI_SEEK_PATH} +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_path_MPI_CXX="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + ;; +esac +fi +MPI_CXX=$ac_cv_path_MPI_CXX +if test -n "$MPI_CXX"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MPI_CXX" >&5 +$as_echo "$MPI_CXX" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$MPI_CXX" && break +done +test -n "$MPI_CXX" || MPI_CXX="notFound" + + + if test "${MPI_CXX}" != "notFound" ; then + CXX=${MPI_CXX} + else + echo "-----" + echo "Cannot find MPI C++ compiler in " ${MPI_SEEK_PATH} + echo "Specify a path to all mpi compilers with --with-mpi-compilers=PATH," + echo "or specify a path to top mpi directory (above bin) with --with-mpi=PATH," + echo "or specify a C++ compiler using CXX=" + echo "or --disable-mpi" + echo "-----" + as_fn_error $? "MPI C++ compiler not found." "$LINENO" 5 + fi + fi + fi + fi + +# if test "X$ac_cv_use_fortran" = "Xyes"; then +# dnl Find a Fortran 77 MPI compiler if MPI_F77 is not already defined +# +# if test -z "${MPI_F77}"; then +# MPI_F77_CANDIDATE=mpif77 +# if test -n "${F77}"; then +# MPI_F77_CANDIDATE=${F77} +# fi +# +# AC_PATH_PROG(MPI_F77, ${MPI_F77_CANDIDATE}, [notFound], [PATH = ${MPI_SEEK_PATH}]) +# +# if test "${MPI_F77}" != "notFound" ; then +# F77=${MPI_F77} +# else +# echo "-----" +# echo "Cannot find MPI Fortan 77 compiler." +# echo "Specify a path to all mpi compilers with --with-mpi-compilers=PATH," +# echo "or specify a path to top mpi directory (above bin) with --with-mpi=PATH," +# echo "or specify a fortran 77 compiler using F77=" +# echo "-----" +# AC_MSG_ERROR([MPI C compiler not found.]) +# fi +# fi +# fi + + if test "X$ac_cv_use_fortran90" = "Xyes"; then + + if test -z "${MPI_FC}"; then + + if test -f "${FC}"; then + + MPI_FC=${FC} + + else + MPI_FC_CANDIDATES="" + + if test -n "${FC}"; then + MPI_FC_CANDIDATES=${FC} + elif test -n "${FTN}"; then + MPI_FC_CANDIDATES=${FTN} + elif test -n "${F90}"; then + MPI_FC_CANDIDATES=${F90} + fi + + if test -n "${MPI_FC_CANDIDATES}" && test -f ${MPI_FC_CANDIDATES} ; then + MPI_FC=${MPI_FC_CANDIDATES} + else + if test -z "${MPI_FC_CANDIDATES}"; then + MPI_FC_CANDIDATES="mpif90 mpif77" + fi + + for ac_prog in ${MPI_FC_CANDIDATES} +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_MPI_FC+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $MPI_FC in + [\\/]* | ?:[\\/]*) + ac_cv_path_MPI_FC="$MPI_FC" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in PATH = ${MPI_SEEK_PATH} +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_path_MPI_FC="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + ;; +esac +fi +MPI_FC=$ac_cv_path_MPI_FC +if test -n "$MPI_FC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MPI_FC" >&5 +$as_echo "$MPI_FC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$MPI_FC" && break +done +test -n "$MPI_FC" || MPI_FC="notFound" + + + if test "${MPI_FC}" != "notFound" ; then + FC=${MPI_FC} + else + echo "-----" + echo "Cannot find MPI Fortran 90 compiler in " ${MPI_SEEK_PATH} + echo "Specify a path to all mpi compilers with --with-mpi-compilers=PATH," + echo "or specify a path to top mpi directory (above bin) with --with-mpi=PATH," + echo "or specify a fortran 90 compiler using FC=" + echo "-----" + as_fn_error $? "MPI C compiler not found." "$LINENO" 5 + fi + fi + fi + fi + fi +fi + +#AC_ARG_WITH(mpi-include, +#[AC_HELP_STRING([--with-mpi-include],[Obsolete. Use --with-mpi-incdir=DIR instead. Do not prefix DIR with '-I'.])], +#[AC_MSG_ERROR([--with-mpi-include is an obsolte option. Use --with-mpi-incdir=DIR instead. Do not prefix DIR with '-I'. For example '--with-mpi-incdir=/usr/lam_path/include'.])] +#) + + +# Check whether --with-mpi-libs was given. +if test "${with_mpi_libs+set}" = set; then : + withval=$with_mpi_libs; + MPI_LIBS=${withval} + { $as_echo "$as_me:${as_lineno-$LINENO}: checking user-defined MPI libraries" >&5 +$as_echo_n "checking user-defined MPI libraries... " >&6; } + { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${MPI_LIBS}" >&5 +$as_echo "${MPI_LIBS}" >&6; } + + +fi + + + +# Check whether --with-mpi-incdir was given. +if test "${with_mpi_incdir+set}" = set; then : + withval=$with_mpi_incdir; + MPI_INC=${withval} + { $as_echo "$as_me:${as_lineno-$LINENO}: checking user-defined MPI includes" >&5 +$as_echo_n "checking user-defined MPI includes... " >&6; } + { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${MPI_INC}" >&5 +$as_echo "${MPI_INC}" >&6; } + + +fi + + + +# Check whether --with-mpi-libdir was given. +if test "${with_mpi_libdir+set}" = set; then : + withval=$with_mpi_libdir; + MPI_LIBDIR=${withval} + { $as_echo "$as_me:${as_lineno-$LINENO}: checking user-defined MPI library directory" >&5 +$as_echo_n "checking user-defined MPI library directory... " >&6; } + { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${MPI_LIBDIR}" >&5 +$as_echo "${MPI_LIBDIR}" >&6; } + + +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using MPI" >&5 +$as_echo_n "checking whether we are using MPI... " >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${HAVE_PKG_MPI}" >&5 +$as_echo "${HAVE_PKG_MPI}" >&6; } + +if test "X${HAVE_PKG_MPI}" = "Xyes"; then + +$as_echo "#define HAVE_MPI /**/" >>confdefs.h + +fi + + + if test "X${HAVE_PKG_MPI}" = "Xyes"; then + HAVE_MPI_TRUE= + HAVE_MPI_FALSE='#' +else + HAVE_MPI_TRUE='#' + HAVE_MPI_FALSE= +fi + + + + +# #np# - can eliminate compiler checks below if your package does not use the +# language corresponding to the check. Please note that if you use +# F77_FUNC to determine Fortran name mangling, you should not remove +# the Fortran compiler check or the check for Fortran flags. Doing +# so will prevent the detection of the proper name mangling in some +# cases. + +# Check whether --enable-mpi-recv-limit was given. +if test "${enable_mpi_recv_limit+set}" = set; then : + enableval=$enable_mpi_recv_limit; { $as_echo "$as_me:${as_lineno-$LINENO}: Building Zoltan to observe a limit of $enable_mpi_recv_limit simultaneously posted MPI receives" >&5 +$as_echo "$as_me: Building Zoltan to observe a limit of $enable_mpi_recv_limit simultaneously posted MPI receives" >&6;} + MPI_RECV_LIMIT_FLAG="-DMPI_RECV_LIMIT=$enable_mpi_recv_limit" + + + +fi + + +# ------------------------------------------------------------------------ +# Checks for programs +# ------------------------------------------------------------------------ + +DEPDIR="${am__leading_dot}deps" + +ac_config_commands="$ac_config_commands depfiles" + + +am_make=${MAKE-make} +cat > confinc << 'END' +am__doit: + @echo this is the am__doit target +.PHONY: am__doit +END +# If we don't find an include directive, just comment out the code. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for style of include used by $am_make" >&5 +$as_echo_n "checking for style of include used by $am_make... " >&6; } +am__include="#" +am__quote= +_am_result=none +# First try GNU make style include. +echo "include confinc" > confmf +# Ignore all kinds of additional output from `make'. +case `$am_make -s -f confmf 2> /dev/null` in #( +*the\ am__doit\ target*) + am__include=include + am__quote= + _am_result=GNU + ;; +esac +# Now try BSD make style include. +if test "$am__include" = "#"; then + echo '.include "confinc"' > confmf + case `$am_make -s -f confmf 2> /dev/null` in #( + *the\ am__doit\ target*) + am__include=.include + am__quote="\"" + _am_result=BSD + ;; + esac +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $_am_result" >&5 +$as_echo "$_am_result" >&6; } +rm -f confinc confmf + +# Check whether --enable-dependency-tracking was given. +if test "${enable_dependency_tracking+set}" = set; then : + enableval=$enable_dependency_tracking; +fi + +if test "x$enable_dependency_tracking" != xno; then + am_depcomp="$ac_aux_dir/depcomp" + AMDEPBACKSLASH='\' + am__nodep='_no' +fi + if test "x$enable_dependency_tracking" != xno; then + AMDEP_TRUE= + AMDEP_FALSE='#' +else + AMDEP_TRUE='#' + AMDEP_FALSE= +fi + + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. +set dummy ${ac_tool_prefix}gcc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_CC="${ac_tool_prefix}gcc" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_CC"; then + ac_ct_CC=$CC + # Extract the first word of "gcc", so it can be a program name with args. +set dummy gcc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_CC"; then + ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_CC="gcc" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_CC=$ac_cv_prog_ac_ct_CC +if test -n "$ac_ct_CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 +$as_echo "$ac_ct_CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_CC" = x; then + CC="" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + CC=$ac_ct_CC + fi +else + CC="$ac_cv_prog_CC" +fi + +if test -z "$CC"; then + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. +set dummy ${ac_tool_prefix}cc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_CC="${ac_tool_prefix}cc" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + fi +fi +if test -z "$CC"; then + # Extract the first word of "cc", so it can be a program name with args. +set dummy cc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else + ac_prog_rejected=no +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then + ac_prog_rejected=yes + continue + fi + ac_cv_prog_CC="cc" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +if test $ac_prog_rejected = yes; then + # We found a bogon in the path, so make sure we never use it. + set dummy $ac_cv_prog_CC + shift + if test $# != 0; then + # We chose a different compiler from the bogus one. + # However, it has the same basename, so the bogon will be chosen + # first if we set CC to just the basename; use the full file name. + shift + ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" + fi +fi +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$CC"; then + if test -n "$ac_tool_prefix"; then + for ac_prog in cl.exe + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_CC="$ac_tool_prefix$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$CC" && break + done +fi +if test -z "$CC"; then + ac_ct_CC=$CC + for ac_prog in cl.exe +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_CC"; then + ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_CC="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_CC=$ac_cv_prog_ac_ct_CC +if test -n "$ac_ct_CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 +$as_echo "$ac_ct_CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$ac_ct_CC" && break +done + + if test "x$ac_ct_CC" = x; then + CC="" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + CC=$ac_ct_CC + fi +fi + +fi + + +test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "no acceptable C compiler found in \$PATH +See \`config.log' for more details" "$LINENO" 5; } + +# Provide some information about the compiler. +$as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 +set X $ac_compile +ac_compiler=$2 +for ac_option in --version -v -V -qversion; do + { { ac_try="$ac_compiler $ac_option >&5" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compiler $ac_option >&5") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + sed '10a\ +... rest of stderr output deleted ... + 10q' conftest.err >conftest.er1 + cat conftest.er1 >&5 + fi + rm -f conftest.er1 conftest.err + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } +done + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +ac_clean_files_save=$ac_clean_files +ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out" +# Try to create an executable without -o first, disregard a.out. +# It will help us diagnose broken compilers, and finding out an intuition +# of exeext. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5 +$as_echo_n "checking whether the C compiler works... " >&6; } +ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` + +# The possible output files: +ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*" + +ac_rmfiles= +for ac_file in $ac_files +do + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; + * ) ac_rmfiles="$ac_rmfiles $ac_file";; + esac +done +rm -f $ac_rmfiles + +if { { ac_try="$ac_link_default" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link_default") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then : + # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. +# So ignore a value of `no', otherwise this would lead to `EXEEXT = no' +# in a Makefile. We should not override ac_cv_exeext if it was cached, +# so that the user can short-circuit this test for compilers unknown to +# Autoconf. +for ac_file in $ac_files '' +do + test -f "$ac_file" || continue + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) + ;; + [ab].out ) + # We found the default executable, but exeext='' is most + # certainly right. + break;; + *.* ) + if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no; + then :; else + ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` + fi + # We set ac_cv_exeext here because the later test for it is not + # safe: cross compilers may not add the suffix if given an `-o' + # argument, so we may need to know it at that point already. + # Even if this section looks crufty: it has the advantage of + # actually working. + break;; + * ) + break;; + esac +done +test "$ac_cv_exeext" = no && ac_cv_exeext= + +else + ac_file='' +fi +if test -z "$ac_file"; then : + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +$as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error 77 "C compiler cannot create executables +See \`config.log' for more details" "$LINENO" 5; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5 +$as_echo_n "checking for C compiler default output file name... " >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5 +$as_echo "$ac_file" >&6; } +ac_exeext=$ac_cv_exeext + +rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out +ac_clean_files=$ac_clean_files_save +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5 +$as_echo_n "checking for suffix of executables... " >&6; } +if { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then : + # If both `conftest.exe' and `conftest' are `present' (well, observable) +# catch `conftest.exe'. For instance with Cygwin, `ls conftest' will +# work properly (i.e., refer to `conftest.exe'), while it won't with +# `rm'. +for ac_file in conftest.exe conftest conftest.*; do + test -f "$ac_file" || continue + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; + *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` + break;; + * ) break;; + esac +done +else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "cannot compute suffix of executables: cannot compile and link +See \`config.log' for more details" "$LINENO" 5; } +fi +rm -f conftest conftest$ac_cv_exeext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5 +$as_echo "$ac_cv_exeext" >&6; } + +rm -f conftest.$ac_ext +EXEEXT=$ac_cv_exeext +ac_exeext=$EXEEXT +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ +FILE *f = fopen ("conftest.out", "w"); + return ferror (f) || fclose (f) != 0; + + ; + return 0; +} +_ACEOF +ac_clean_files="$ac_clean_files conftest.out" +# Check that the compiler produces executables we can run. If not, either +# the compiler is broken, or we cross compile. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5 +$as_echo_n "checking whether we are cross compiling... " >&6; } +if test "$cross_compiling" != yes; then + { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + if { ac_try='./conftest$ac_cv_exeext' + { { case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; }; then + cross_compiling=no + else + if test "$cross_compiling" = maybe; then + cross_compiling=yes + else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "cannot run C compiled programs. +If you meant to cross compile, use \`--host'. +See \`config.log' for more details" "$LINENO" 5; } + fi + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5 +$as_echo "$cross_compiling" >&6; } + +rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out +ac_clean_files=$ac_clean_files_save +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5 +$as_echo_n "checking for suffix of object files... " >&6; } +if ${ac_cv_objext+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.o conftest.obj +if { { ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compile") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then : + for ac_file in conftest.o conftest.obj conftest.*; do + test -f "$ac_file" || continue; + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;; + *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` + break;; + esac +done +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "cannot compute suffix of object files: cannot compile +See \`config.log' for more details" "$LINENO" 5; } +fi +rm -f conftest.$ac_cv_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5 +$as_echo "$ac_cv_objext" >&6; } +OBJEXT=$ac_cv_objext +ac_objext=$OBJEXT +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 +$as_echo_n "checking whether we are using the GNU C compiler... " >&6; } +if ${ac_cv_c_compiler_gnu+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ +#ifndef __GNUC__ + choke me +#endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_compiler_gnu=yes +else + ac_compiler_gnu=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_cv_c_compiler_gnu=$ac_compiler_gnu + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 +$as_echo "$ac_cv_c_compiler_gnu" >&6; } +if test $ac_compiler_gnu = yes; then + GCC=yes +else + GCC= +fi +ac_test_CFLAGS=${CFLAGS+set} +ac_save_CFLAGS=$CFLAGS +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 +$as_echo_n "checking whether $CC accepts -g... " >&6; } +if ${ac_cv_prog_cc_g+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_save_c_werror_flag=$ac_c_werror_flag + ac_c_werror_flag=yes + ac_cv_prog_cc_g=no + CFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_prog_cc_g=yes +else + CFLAGS="" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + +else + ac_c_werror_flag=$ac_save_c_werror_flag + CFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_prog_cc_g=yes +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_c_werror_flag=$ac_save_c_werror_flag +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 +$as_echo "$ac_cv_prog_cc_g" >&6; } +if test "$ac_test_CFLAGS" = set; then + CFLAGS=$ac_save_CFLAGS +elif test $ac_cv_prog_cc_g = yes; then + if test "$GCC" = yes; then + CFLAGS="-g -O2" + else + CFLAGS="-g" + fi +else + if test "$GCC" = yes; then + CFLAGS="-O2" + else + CFLAGS= + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 +$as_echo_n "checking for $CC option to accept ISO C89... " >&6; } +if ${ac_cv_prog_cc_c89+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_cv_prog_cc_c89=no +ac_save_CC=$CC +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +#include +#include +/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ +struct buf { int x; }; +FILE * (*rcsopen) (struct buf *, struct stat *, int); +static char *e (p, i) + char **p; + int i; +{ + return p[i]; +} +static char *f (char * (*g) (char **, int), char **p, ...) +{ + char *s; + va_list v; + va_start (v,p); + s = g (p, va_arg (v,int)); + va_end (v); + return s; +} + +/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has + function prototypes and stuff, but not '\xHH' hex character constants. + These don't provoke an error unfortunately, instead are silently treated + as 'x'. The following induces an error, until -std is added to get + proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an + array size at least. It's necessary to write '\x00'==0 to get something + that's true only with -std. */ +int osf4_cc_array ['\x00' == 0 ? 1 : -1]; + +/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters + inside strings and character constants. */ +#define FOO(x) 'x' +int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; + +int test (int i, double x); +struct s1 {int (*f) (int a);}; +struct s2 {int (*f) (double a);}; +int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); +int argc; +char **argv; +int +main () +{ +return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; + ; + return 0; +} +_ACEOF +for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ + -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" +do + CC="$ac_save_CC $ac_arg" + if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_prog_cc_c89=$ac_arg +fi +rm -f core conftest.err conftest.$ac_objext + test "x$ac_cv_prog_cc_c89" != "xno" && break +done +rm -f conftest.$ac_ext +CC=$ac_save_CC + +fi +# AC_CACHE_VAL +case "x$ac_cv_prog_cc_c89" in + x) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 +$as_echo "none needed" >&6; } ;; + xno) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 +$as_echo "unsupported" >&6; } ;; + *) + CC="$CC $ac_cv_prog_cc_c89" + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 +$as_echo "$ac_cv_prog_cc_c89" >&6; } ;; +esac +if test "x$ac_cv_prog_cc_c89" != xno; then : + +fi + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +depcc="$CC" am_compiler_list= + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5 +$as_echo_n "checking dependency style of $depcc... " >&6; } +if ${am_cv_CC_dependencies_compiler_type+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then + # We make a subdir and do the tests there. Otherwise we can end up + # making bogus files that we don't know about and never remove. For + # instance it was reported that on HP-UX the gcc test will end up + # making a dummy file named `D' -- because `-MD' means `put the output + # in D'. + rm -rf conftest.dir + mkdir conftest.dir + # Copy depcomp to subdir because otherwise we won't find it if we're + # using a relative directory. + cp "$am_depcomp" conftest.dir + cd conftest.dir + # We will build objects and dependencies in a subdirectory because + # it helps to detect inapplicable dependency modes. For instance + # both Tru64's cc and ICC support -MD to output dependencies as a + # side effect of compilation, but ICC will put the dependencies in + # the current directory while Tru64 will put them in the object + # directory. + mkdir sub + + am_cv_CC_dependencies_compiler_type=none + if test "$am_compiler_list" = ""; then + am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp` + fi + am__universal=false + case " $depcc " in #( + *\ -arch\ *\ -arch\ *) am__universal=true ;; + esac + + for depmode in $am_compiler_list; do + # Setup a source with many dependencies, because some compilers + # like to wrap large dependency lists on column 80 (with \), and + # we should not choose a depcomp mode which is confused by this. + # + # We need to recreate these files for each test, as the compiler may + # overwrite some of them when testing with obscure command lines. + # This happens at least with the AIX C compiler. + : > sub/conftest.c + for i in 1 2 3 4 5 6; do + echo '#include "conftst'$i'.h"' >> sub/conftest.c + # Using `: > sub/conftst$i.h' creates only sub/conftst1.h with + # Solaris 8's {/usr,}/bin/sh. + touch sub/conftst$i.h + done + echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf + + # We check with `-c' and `-o' for the sake of the "dashmstdout" + # mode. It turns out that the SunPro C++ compiler does not properly + # handle `-M -o', and we need to detect this. Also, some Intel + # versions had trouble with output in subdirs + am__obj=sub/conftest.${OBJEXT-o} + am__minus_obj="-o $am__obj" + case $depmode in + gcc) + # This depmode causes a compiler race in universal mode. + test "$am__universal" = false || continue + ;; + nosideeffect) + # after this tag, mechanisms are not by side-effect, so they'll + # only be used when explicitly requested + if test "x$enable_dependency_tracking" = xyes; then + continue + else + break + fi + ;; + msvc7 | msvc7msys | msvisualcpp | msvcmsys) + # This compiler won't grok `-c -o', but also, the minuso test has + # not run yet. These depmodes are late enough in the game, and + # so weak that their functioning should not be impacted. + am__obj=conftest.${OBJEXT-o} + am__minus_obj= + ;; + none) break ;; + esac + if depmode=$depmode \ + source=sub/conftest.c object=$am__obj \ + depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ + $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ + >/dev/null 2>conftest.err && + grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && + grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && + grep $am__obj sub/conftest.Po > /dev/null 2>&1 && + ${MAKE-make} -s -f confmf > /dev/null 2>&1; then + # icc doesn't choke on unknown options, it will just issue warnings + # or remarks (even with -Werror). So we grep stderr for any message + # that says an option was ignored or not supported. + # When given -MP, icc 7.0 and 7.1 complain thusly: + # icc: Command line warning: ignoring option '-M'; no argument required + # The diagnosis changed in icc 8.0: + # icc: Command line remark: option '-MP' not supported + if (grep 'ignoring option' conftest.err || + grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else + am_cv_CC_dependencies_compiler_type=$depmode + break + fi + fi + done + + cd .. + rm -rf conftest.dir +else + am_cv_CC_dependencies_compiler_type=none +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_CC_dependencies_compiler_type" >&5 +$as_echo "$am_cv_CC_dependencies_compiler_type" >&6; } +CCDEPMODE=depmode=$am_cv_CC_dependencies_compiler_type + + if + test "x$enable_dependency_tracking" != xno \ + && test "$am_cv_CC_dependencies_compiler_type" = gcc3; then + am__fastdepCC_TRUE= + am__fastdepCC_FALSE='#' +else + am__fastdepCC_TRUE='#' + am__fastdepCC_FALSE= +fi + + +if test "x$CC" != xcc; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC and cc understand -c and -o together" >&5 +$as_echo_n "checking whether $CC and cc understand -c and -o together... " >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether cc understands -c and -o together" >&5 +$as_echo_n "checking whether cc understands -c and -o together... " >&6; } +fi +set dummy $CC; ac_cc=`$as_echo "$2" | + sed 's/[^a-zA-Z0-9_]/_/g;s/^[0-9]/_/'` +if eval \${ac_cv_prog_cc_${ac_cc}_c_o+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +# Make sure it works both with $CC and with simple cc. +# We do the test twice because some compilers refuse to overwrite an +# existing .o file with -o, though they will create one. +ac_try='$CC -c conftest.$ac_ext -o conftest2.$ac_objext >&5' +rm -f conftest2.* +if { { case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && + test -f conftest2.$ac_objext && { { case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; +then + eval ac_cv_prog_cc_${ac_cc}_c_o=yes + if test "x$CC" != xcc; then + # Test first that cc exists at all. + if { ac_try='cc -c conftest.$ac_ext >&5' + { { case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; }; then + ac_try='cc -c conftest.$ac_ext -o conftest2.$ac_objext >&5' + rm -f conftest2.* + if { { case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && + test -f conftest2.$ac_objext && { { case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; + then + # cc works too. + : + else + # cc exists but doesn't like -o. + eval ac_cv_prog_cc_${ac_cc}_c_o=no + fi + fi + fi +else + eval ac_cv_prog_cc_${ac_cc}_c_o=no +fi +rm -f core conftest* + +fi +if eval test \$ac_cv_prog_cc_${ac_cc}_c_o = yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + +$as_echo "#define NO_MINUS_C_MINUS_O 1" >>confdefs.h + +fi + +# FIXME: we rely on the cache variable name because +# there is no other way. +set dummy $CC +am_cc=`echo $2 | sed 's/[^a-zA-Z0-9_]/_/g;s/^[0-9]/_/'` +eval am_t=\$ac_cv_prog_cc_${am_cc}_c_o +if test "$am_t" != yes; then + # Losing compiler, so override with the script. + # FIXME: It is wrong to rewrite CC. + # But if we don't then we get into trouble of one sort or another. + # A longer-term fix would be to have automake use am__CC in this case, + # and then we could set am__CC="\$(top_srcdir)/compile \$(CC)" + CC="$am_aux_dir/compile $CC" +fi + + +#Conditional only? +#if test "X$ac_cv_use_zoltan_cppdriver" = "Xyes"; then +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu +if test -z "$CXX"; then + if test -n "$CCC"; then + CXX=$CCC + else + if test -n "$ac_tool_prefix"; then + for ac_prog in CC g++ c++ cxx + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CXX"; then + ac_cv_prog_CXX="$CXX" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_CXX="$ac_tool_prefix$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CXX=$ac_cv_prog_CXX +if test -n "$CXX"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXX" >&5 +$as_echo "$CXX" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$CXX" && break + done +fi +if test -z "$CXX"; then + ac_ct_CXX=$CXX + for ac_prog in CC g++ c++ cxx +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_CXX"; then + ac_cv_prog_ac_ct_CXX="$ac_ct_CXX" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_CXX="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_CXX=$ac_cv_prog_ac_ct_CXX +if test -n "$ac_ct_CXX"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CXX" >&5 +$as_echo "$ac_ct_CXX" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$ac_ct_CXX" && break +done + + if test "x$ac_ct_CXX" = x; then + CXX="g++" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + CXX=$ac_ct_CXX + fi +fi + + fi +fi +# Provide some information about the compiler. +$as_echo "$as_me:${as_lineno-$LINENO}: checking for C++ compiler version" >&5 +set X $ac_compile +ac_compiler=$2 +for ac_option in --version -v -V -qversion; do + { { ac_try="$ac_compiler $ac_option >&5" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compiler $ac_option >&5") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + sed '10a\ +... rest of stderr output deleted ... + 10q' conftest.err >conftest.er1 + cat conftest.er1 >&5 + fi + rm -f conftest.er1 conftest.err + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } +done + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C++ compiler" >&5 +$as_echo_n "checking whether we are using the GNU C++ compiler... " >&6; } +if ${ac_cv_cxx_compiler_gnu+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ +#ifndef __GNUC__ + choke me +#endif + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + ac_compiler_gnu=yes +else + ac_compiler_gnu=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_cv_cxx_compiler_gnu=$ac_compiler_gnu + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_cxx_compiler_gnu" >&5 +$as_echo "$ac_cv_cxx_compiler_gnu" >&6; } +if test $ac_compiler_gnu = yes; then + GXX=yes +else + GXX= +fi +ac_test_CXXFLAGS=${CXXFLAGS+set} +ac_save_CXXFLAGS=$CXXFLAGS +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CXX accepts -g" >&5 +$as_echo_n "checking whether $CXX accepts -g... " >&6; } +if ${ac_cv_prog_cxx_g+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_save_cxx_werror_flag=$ac_cxx_werror_flag + ac_cxx_werror_flag=yes + ac_cv_prog_cxx_g=no + CXXFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + ac_cv_prog_cxx_g=yes +else + CXXFLAGS="" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + +else + ac_cxx_werror_flag=$ac_save_cxx_werror_flag + CXXFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + ac_cv_prog_cxx_g=yes +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_cxx_werror_flag=$ac_save_cxx_werror_flag +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_g" >&5 +$as_echo "$ac_cv_prog_cxx_g" >&6; } +if test "$ac_test_CXXFLAGS" = set; then + CXXFLAGS=$ac_save_CXXFLAGS +elif test $ac_cv_prog_cxx_g = yes; then + if test "$GXX" = yes; then + CXXFLAGS="-g -O2" + else + CXXFLAGS="-g" + fi +else + if test "$GXX" = yes; then + CXXFLAGS="-O2" + else + CXXFLAGS= + fi +fi +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +depcc="$CXX" am_compiler_list= + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5 +$as_echo_n "checking dependency style of $depcc... " >&6; } +if ${am_cv_CXX_dependencies_compiler_type+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then + # We make a subdir and do the tests there. Otherwise we can end up + # making bogus files that we don't know about and never remove. For + # instance it was reported that on HP-UX the gcc test will end up + # making a dummy file named `D' -- because `-MD' means `put the output + # in D'. + rm -rf conftest.dir + mkdir conftest.dir + # Copy depcomp to subdir because otherwise we won't find it if we're + # using a relative directory. + cp "$am_depcomp" conftest.dir + cd conftest.dir + # We will build objects and dependencies in a subdirectory because + # it helps to detect inapplicable dependency modes. For instance + # both Tru64's cc and ICC support -MD to output dependencies as a + # side effect of compilation, but ICC will put the dependencies in + # the current directory while Tru64 will put them in the object + # directory. + mkdir sub + + am_cv_CXX_dependencies_compiler_type=none + if test "$am_compiler_list" = ""; then + am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp` + fi + am__universal=false + case " $depcc " in #( + *\ -arch\ *\ -arch\ *) am__universal=true ;; + esac + + for depmode in $am_compiler_list; do + # Setup a source with many dependencies, because some compilers + # like to wrap large dependency lists on column 80 (with \), and + # we should not choose a depcomp mode which is confused by this. + # + # We need to recreate these files for each test, as the compiler may + # overwrite some of them when testing with obscure command lines. + # This happens at least with the AIX C compiler. + : > sub/conftest.c + for i in 1 2 3 4 5 6; do + echo '#include "conftst'$i'.h"' >> sub/conftest.c + # Using `: > sub/conftst$i.h' creates only sub/conftst1.h with + # Solaris 8's {/usr,}/bin/sh. + touch sub/conftst$i.h + done + echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf + + # We check with `-c' and `-o' for the sake of the "dashmstdout" + # mode. It turns out that the SunPro C++ compiler does not properly + # handle `-M -o', and we need to detect this. Also, some Intel + # versions had trouble with output in subdirs + am__obj=sub/conftest.${OBJEXT-o} + am__minus_obj="-o $am__obj" + case $depmode in + gcc) + # This depmode causes a compiler race in universal mode. + test "$am__universal" = false || continue + ;; + nosideeffect) + # after this tag, mechanisms are not by side-effect, so they'll + # only be used when explicitly requested + if test "x$enable_dependency_tracking" = xyes; then + continue + else + break + fi + ;; + msvc7 | msvc7msys | msvisualcpp | msvcmsys) + # This compiler won't grok `-c -o', but also, the minuso test has + # not run yet. These depmodes are late enough in the game, and + # so weak that their functioning should not be impacted. + am__obj=conftest.${OBJEXT-o} + am__minus_obj= + ;; + none) break ;; + esac + if depmode=$depmode \ + source=sub/conftest.c object=$am__obj \ + depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ + $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ + >/dev/null 2>conftest.err && + grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && + grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && + grep $am__obj sub/conftest.Po > /dev/null 2>&1 && + ${MAKE-make} -s -f confmf > /dev/null 2>&1; then + # icc doesn't choke on unknown options, it will just issue warnings + # or remarks (even with -Werror). So we grep stderr for any message + # that says an option was ignored or not supported. + # When given -MP, icc 7.0 and 7.1 complain thusly: + # icc: Command line warning: ignoring option '-M'; no argument required + # The diagnosis changed in icc 8.0: + # icc: Command line remark: option '-MP' not supported + if (grep 'ignoring option' conftest.err || + grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else + am_cv_CXX_dependencies_compiler_type=$depmode + break + fi + fi + done + + cd .. + rm -rf conftest.dir +else + am_cv_CXX_dependencies_compiler_type=none +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_CXX_dependencies_compiler_type" >&5 +$as_echo "$am_cv_CXX_dependencies_compiler_type" >&6; } +CXXDEPMODE=depmode=$am_cv_CXX_dependencies_compiler_type + + if + test "x$enable_dependency_tracking" != xno \ + && test "$am_cv_CXX_dependencies_compiler_type" = gcc3; then + am__fastdepCXX_TRUE= + am__fastdepCXX_FALSE='#' +else + am__fastdepCXX_TRUE='#' + am__fastdepCXX_FALSE= +fi + + +#fi + +if test "X$ac_cv_use_fortran90" = "Xyes"; then +ac_ext=${ac_fc_srcext-f} +ac_compile='$FC -c $FCFLAGS $ac_fcflags_srcext conftest.$ac_ext >&5' +ac_link='$FC -o conftest$ac_exeext $FCFLAGS $LDFLAGS $ac_fcflags_srcext conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_fc_compiler_gnu +if test -n "$ac_tool_prefix"; then + for ac_prog in FC gfortran f90 xlf90 f95 + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_FC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$FC"; then + ac_cv_prog_FC="$FC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_FC="$ac_tool_prefix$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +FC=$ac_cv_prog_FC +if test -n "$FC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $FC" >&5 +$as_echo "$FC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$FC" && break + done +fi +if test -z "$FC"; then + ac_ct_FC=$FC + for ac_prog in FC gfortran f90 xlf90 f95 +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_FC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_FC"; then + ac_cv_prog_ac_ct_FC="$ac_ct_FC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_FC="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_FC=$ac_cv_prog_ac_ct_FC +if test -n "$ac_ct_FC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_FC" >&5 +$as_echo "$ac_ct_FC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$ac_ct_FC" && break +done + + if test "x$ac_ct_FC" = x; then + FC="" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + FC=$ac_ct_FC + fi +fi + + +# Provide some information about the compiler. +$as_echo "$as_me:${as_lineno-$LINENO}: checking for Fortran compiler version" >&5 +set X $ac_compile +ac_compiler=$2 +for ac_option in --version -v -V -qversion; do + { { ac_try="$ac_compiler $ac_option >&5" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compiler $ac_option >&5") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + sed '10a\ +... rest of stderr output deleted ... + 10q' conftest.err >conftest.er1 + cat conftest.er1 >&5 + fi + rm -f conftest.er1 conftest.err + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } +done +rm -f a.out + +# If we don't use `.F' as extension, the preprocessor is not run on the +# input file. (Note that this only needs to work for GNU compilers.) +ac_save_ext=$ac_ext +ac_ext=F +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU Fortran compiler" >&5 +$as_echo_n "checking whether we are using the GNU Fortran compiler... " >&6; } +if ${ac_cv_fc_compiler_gnu+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat > conftest.$ac_ext <<_ACEOF + program main +#ifndef __GNUC__ + choke me +#endif + + end +_ACEOF +if ac_fn_fc_try_compile "$LINENO"; then : + ac_compiler_gnu=yes +else + ac_compiler_gnu=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_cv_fc_compiler_gnu=$ac_compiler_gnu + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_fc_compiler_gnu" >&5 +$as_echo "$ac_cv_fc_compiler_gnu" >&6; } +ac_ext=$ac_save_ext +ac_test_FCFLAGS=${FCFLAGS+set} +ac_save_FCFLAGS=$FCFLAGS +FCFLAGS= +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $FC accepts -g" >&5 +$as_echo_n "checking whether $FC accepts -g... " >&6; } +if ${ac_cv_prog_fc_g+:} false; then : + $as_echo_n "(cached) " >&6 +else + FCFLAGS=-g +cat > conftest.$ac_ext <<_ACEOF + program main + + end +_ACEOF +if ac_fn_fc_try_compile "$LINENO"; then : + ac_cv_prog_fc_g=yes +else + ac_cv_prog_fc_g=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_fc_g" >&5 +$as_echo "$ac_cv_prog_fc_g" >&6; } +if test "$ac_test_FCFLAGS" = set; then + FCFLAGS=$ac_save_FCFLAGS +elif test $ac_cv_prog_fc_g = yes; then + if test "x$ac_cv_fc_compiler_gnu" = xyes; then + FCFLAGS="-g -O2" + else + FCFLAGS="-g" + fi +else + if test "x$ac_cv_fc_compiler_gnu" = xyes; then + FCFLAGS="-O2" + else + FCFLAGS= + fi +fi + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +fi + +if test "X$ac_cv_use_fortran" = "Xyes"; then +ac_ext=${ac_fc_srcext-f} +ac_compile='$FC -c $FCFLAGS $ac_fcflags_srcext conftest.$ac_ext >&5' +ac_link='$FC -o conftest$ac_exeext $FCFLAGS $LDFLAGS $ac_fcflags_srcext conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_fc_compiler_gnu +if test -n "$ac_tool_prefix"; then + for ac_prog in FC gfortran f90 xlf90 f95 + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_FC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$FC"; then + ac_cv_prog_FC="$FC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_FC="$ac_tool_prefix$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +FC=$ac_cv_prog_FC +if test -n "$FC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $FC" >&5 +$as_echo "$FC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$FC" && break + done +fi +if test -z "$FC"; then + ac_ct_FC=$FC + for ac_prog in FC gfortran f90 xlf90 f95 +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_FC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_FC"; then + ac_cv_prog_ac_ct_FC="$ac_ct_FC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_FC="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_FC=$ac_cv_prog_ac_ct_FC +if test -n "$ac_ct_FC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_FC" >&5 +$as_echo "$ac_ct_FC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$ac_ct_FC" && break +done + + if test "x$ac_ct_FC" = x; then + FC="" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + FC=$ac_ct_FC + fi +fi + + +# Provide some information about the compiler. +$as_echo "$as_me:${as_lineno-$LINENO}: checking for Fortran compiler version" >&5 +set X $ac_compile +ac_compiler=$2 +for ac_option in --version -v -V -qversion; do + { { ac_try="$ac_compiler $ac_option >&5" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compiler $ac_option >&5") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + sed '10a\ +... rest of stderr output deleted ... + 10q' conftest.err >conftest.er1 + cat conftest.er1 >&5 + fi + rm -f conftest.er1 conftest.err + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } +done +rm -f a.out + +# If we don't use `.F' as extension, the preprocessor is not run on the +# input file. (Note that this only needs to work for GNU compilers.) +ac_save_ext=$ac_ext +ac_ext=F +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU Fortran compiler" >&5 +$as_echo_n "checking whether we are using the GNU Fortran compiler... " >&6; } +if ${ac_cv_fc_compiler_gnu+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat > conftest.$ac_ext <<_ACEOF + program main +#ifndef __GNUC__ + choke me +#endif + + end +_ACEOF +if ac_fn_fc_try_compile "$LINENO"; then : + ac_compiler_gnu=yes +else + ac_compiler_gnu=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_cv_fc_compiler_gnu=$ac_compiler_gnu + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_fc_compiler_gnu" >&5 +$as_echo "$ac_cv_fc_compiler_gnu" >&6; } +ac_ext=$ac_save_ext +ac_test_FCFLAGS=${FCFLAGS+set} +ac_save_FCFLAGS=$FCFLAGS +FCFLAGS= +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $FC accepts -g" >&5 +$as_echo_n "checking whether $FC accepts -g... " >&6; } +if ${ac_cv_prog_fc_g+:} false; then : + $as_echo_n "(cached) " >&6 +else + FCFLAGS=-g +cat > conftest.$ac_ext <<_ACEOF + program main + + end +_ACEOF +if ac_fn_fc_try_compile "$LINENO"; then : + ac_cv_prog_fc_g=yes +else + ac_cv_prog_fc_g=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_fc_g" >&5 +$as_echo "$ac_cv_prog_fc_g" >&6; } +if test "$ac_test_FCFLAGS" = set; then + FCFLAGS=$ac_save_FCFLAGS +elif test $ac_cv_prog_fc_g = yes; then + if test "x$ac_cv_fc_compiler_gnu" = xyes; then + FCFLAGS="-g -O2" + else + FCFLAGS="-g" + fi +else + if test "x$ac_cv_fc_compiler_gnu" = xyes; then + FCFLAGS="-O2" + else + FCFLAGS= + fi +fi + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +fi + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args. +set dummy ${ac_tool_prefix}ranlib; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_RANLIB+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$RANLIB"; then + ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +RANLIB=$ac_cv_prog_RANLIB +if test -n "$RANLIB"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $RANLIB" >&5 +$as_echo "$RANLIB" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_RANLIB"; then + ac_ct_RANLIB=$RANLIB + # Extract the first word of "ranlib", so it can be a program name with args. +set dummy ranlib; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_RANLIB+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_RANLIB"; then + ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_RANLIB="ranlib" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB +if test -n "$ac_ct_RANLIB"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_RANLIB" >&5 +$as_echo "$ac_ct_RANLIB" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_RANLIB" = x; then + RANLIB=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + RANLIB=$ac_ct_RANLIB + fi +else + RANLIB="$ac_cv_prog_RANLIB" +fi + + +# Check if --with-flags present, prepend any specs to FLAGS + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether additional CCFLAGS flags should be added" >&5 +$as_echo_n "checking whether additional CCFLAGS flags should be added... " >&6; } + +# Check whether --with-ccflags was given. +if test "${with_ccflags+set}" = set; then : + withval=$with_ccflags; +CCFLAGS="${withval} ${CCFLAGS}" +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: CCFLAGS = ${CCFLAGS}" >&5 +$as_echo "CCFLAGS = ${CCFLAGS}" >&6; } + +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + +fi + + +#if test "X$ac_cv_use_zoltan_cppdriver" = "Xyes"; then + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether additional CXXFLAGS flags should be added" >&5 +$as_echo_n "checking whether additional CXXFLAGS flags should be added... " >&6; } + +# Check whether --with-cxxflags was given. +if test "${with_cxxflags+set}" = set; then : + withval=$with_cxxflags; +CXXFLAGS="${withval} ${CXXFLAGS}" +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: CXXFLAGS = ${CXXFLAGS}" >&5 +$as_echo "CXXFLAGS = ${CXXFLAGS}" >&6; } + +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + +fi + + +CXXFLAGS="-DMPICH_IGNORE_CXX_SEEK ${CXXFLAGS}" +#fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether additional CFLAGS flags should be added" >&5 +$as_echo_n "checking whether additional CFLAGS flags should be added... " >&6; } + +# Check whether --with-cflags was given. +if test "${with_cflags+set}" = set; then : + withval=$with_cflags; +CFLAGS="${withval} ${CFLAGS}" +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: CFLAGS = ${CFLAGS}" >&5 +$as_echo "CFLAGS = ${CFLAGS}" >&6; } + +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + +fi + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether additional FCFLAGS flags should be added" >&5 +$as_echo_n "checking whether additional FCFLAGS flags should be added... " >&6; } + +# Check whether --with-fcflags was given. +if test "${with_fcflags+set}" = set; then : + withval=$with_fcflags; +FCFLAGS="${withval} ${FCFLAGS}" +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: FCFLAGS = ${FCFLAGS}" >&5 +$as_echo "FCFLAGS = ${FCFLAGS}" >&6; } + +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + +fi + + +#TAC_ARG_WITH_FLAGS(fflags, FFLAGS) + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether additional libraries are needed" >&5 +$as_echo_n "checking whether additional libraries are needed... " >&6; } + +# Check whether --with-libs was given. +if test "${with_libs+set}" = set; then : + withval=$with_libs; +LIBS="${withval} ${LIBS}" +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: LIBS = ${LIBS}" >&5 +$as_echo "LIBS = ${LIBS}" >&6; } + +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + +fi + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether additional LDFLAGS flags should be added" >&5 +$as_echo_n "checking whether additional LDFLAGS flags should be added... " >&6; } + +# Check whether --with-ldflags was given. +if test "${with_ldflags+set}" = set; then : + withval=$with_ldflags; +LDFLAGS="${withval} ${LDFLAGS}" +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: LDFLAGS = ${LDFLAGS}" >&5 +$as_echo "LDFLAGS = ${LDFLAGS}" >&6; } + +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + +fi + + + +# ------------------------------------------------------------------------ +# Alternate archiver +# ------------------------------------------------------------------------ + + + +# Check whether --with-ar was given. +if test "${with_ar+set}" = set; then : + withval=$with_ar; +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking user-defined archiver" >&5 +$as_echo_n "checking user-defined archiver... " >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${withval}" >&5 +$as_echo "${withval}" >&6; } +USE_ALTERNATE_AR=yes +ALTERNATE_AR="${withval}" + + +fi + + +if test -n "${SPECIAL_AR}" && test "X${USE_ALTERNATE_AR}" != "Xyes"; +then + USE_ALTERNATE_AR=yes + ALTERNATE_AR="${SPECIAL_AR}" +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for special archiver command" >&5 +$as_echo_n "checking for special archiver command... " >&6; } +if test "X${USE_ALTERNATE_AR}" = "Xyes"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${ALTERNATE_AR}" >&5 +$as_echo "${ALTERNATE_AR}" >&6; } + if true; then + USE_ALTERNATE_AR_TRUE= + USE_ALTERNATE_AR_FALSE='#' +else + USE_ALTERNATE_AR_TRUE='#' + USE_ALTERNATE_AR_FALSE= +fi + +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: none" >&5 +$as_echo "none" >&6; } + if false; then + USE_ALTERNATE_AR_TRUE= + USE_ALTERNATE_AR_FALSE='#' +else + USE_ALTERNATE_AR_TRUE='#' + USE_ALTERNATE_AR_FALSE= +fi + +fi + + + +# ------------------------------------------------------------------------ +# MPI link check +# ------------------------------------------------------------------------ + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C++ preprocessor" >&5 +$as_echo_n "checking how to run the C++ preprocessor... " >&6; } +if test -z "$CXXCPP"; then + if ${ac_cv_prog_CXXCPP+:} false; then : + $as_echo_n "(cached) " >&6 +else + # Double quotes because CXXCPP needs to be expanded + for CXXCPP in "$CXX -E" "/lib/cpp" + do + ac_preproc_ok=false +for ac_cxx_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if ac_fn_cxx_try_cpp "$LINENO"; then : + +else + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.i conftest.$ac_ext + + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +_ACEOF +if ac_fn_cxx_try_cpp "$LINENO"; then : + # Broken: success on invalid input. +continue +else + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.i conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.i conftest.err conftest.$ac_ext +if $ac_preproc_ok; then : + break +fi + + done + ac_cv_prog_CXXCPP=$CXXCPP + +fi + CXXCPP=$ac_cv_prog_CXXCPP +else + ac_cv_prog_CXXCPP=$CXXCPP +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXXCPP" >&5 +$as_echo "$CXXCPP" >&6; } +ac_preproc_ok=false +for ac_cxx_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if ac_fn_cxx_try_cpp "$LINENO"; then : + +else + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.i conftest.$ac_ext + + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +_ACEOF +if ac_fn_cxx_try_cpp "$LINENO"; then : + # Broken: success on invalid input. +continue +else + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.i conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.i conftest.err conftest.$ac_ext +if $ac_preproc_ok; then : + +else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "C++ preprocessor \"$CXXCPP\" fails sanity check +See \`config.log' for more details" "$LINENO" 5; } +fi + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + + + +if test "X${HAVE_PKG_MPI}" = "Xyes"; then + + if test -n "${MPI_DIR}" && test -z "${MPI_INC}"; then + MPI_INC="${MPI_DIR}/include" + fi + + if test -n "${MPI_INC}"; then + CPPFLAGS="${CPPFLAGS} -I${MPI_INC}" + fi + + ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for mpi.h" >&5 +$as_echo_n "checking for mpi.h... " >&6; } + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include "mpi.h" +_ACEOF +if ac_fn_cxx_try_cpp "$LINENO"; then : + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + echo "-----" + echo "Cannot link simple MPI program." + echo "Try --with-mpi-compilers to specify MPI compilers." + echo "Or try --with-mpi-libs, --with-mpi-incdir, --with-mpi-libdir" + echo "to specify all the specific MPI compile options." + echo "-----" + as_fn_error $? "MPI cannot link" "$LINENO" 5 + +fi +rm -f conftest.err conftest.i conftest.$ac_ext + + if test -n "${MPI_DIR}" && test -z "${MPI_LIBDIR}"; then + MPI_LIBDIR="${MPI_DIR}/lib" + fi + + if test -n "${MPI_LIBDIR}"; then + LDFLAGS="${LDFLAGS} -L${MPI_LIBDIR}" + fi + + if test -z "${MPI_LIBS}" && test -n "${MPI_LIBDIR}"; then + MPI_LIBS="-lmpi" + fi + + if test -n "${MPI_LIBS}"; then + LIBS="${MPI_LIBS} ${LIBS}" + fi + +# AC_LANG_CPLUSPLUS +# AC_MSG_CHECKING(whether MPI will link using C++ compiler) +# AC_TRY_LINK([#include ], +# [int c; char** v; MPI_Init(&c,&v);], +# [AC_MSG_RESULT(yes)], +# [AC_MSG_RESULT(no) +# echo "-----" +# echo "Cannot link simple MPI program." +# echo "Or try --with-mpi-libs, --with-mpi-incdir, --with-mpi-libdir" +# echo "to specify all the specific MPI compile options." +# echo "-----" +# AC_MSG_ERROR(MPI cannot link)] +# ) + +fi + + +# ------------------------------------------------------------------------ +# Checks for Makefile.export related systems +# ------------------------------------------------------------------------ +# Add this later + +# Check whether --enable-export-makefiles was given. +if test "${enable_export_makefiles+set}" = set; then : + enableval=$enable_export_makefiles; ac_cv_use_export_makefiles=$enableval +else + ac_cv_use_export_makefiles=yes +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build export makefiles" >&5 +$as_echo_n "checking whether to build export makefiles... " >&6; } + +if test "X$ac_cv_use_export_makefiles" != "Xno"; then + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + +$as_echo "#define HAVE_EXPORT_MAKEFILES /**/" >>confdefs.h + + +else + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + +fi + + if test X${ac_cv_use_export_makefiles} = Xyes; then + USING_EXPORT_MAKEFILES_TRUE= + USING_EXPORT_MAKEFILES_FALSE='#' +else + USING_EXPORT_MAKEFILES_TRUE='#' + USING_EXPORT_MAKEFILES_FALSE= +fi + + +# Check for perl to run scripts (Required dependency) + + + +# Check whether --with-perl was given. +if test "${with_perl+set}" = set; then : + withval=$with_perl; +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for user supplied perl executable" >&5 +$as_echo_n "checking for user supplied perl executable... " >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${withval}" >&5 +$as_echo "${withval}" >&6; } +USER_SPECIFIED_PERL=yes +PERL_EXE="${withval}" + +else + +USER_SPECIFIED_PERL=no + +fi + + +if test "X${USER_SPECIFIED_PERL}" = "Xyes"; then + as_ac_File=`$as_echo "ac_cv_file_${PERL_EXE}" | $as_tr_sh` +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ${PERL_EXE}" >&5 +$as_echo_n "checking for ${PERL_EXE}... " >&6; } +if eval \${$as_ac_File+:} false; then : + $as_echo_n "(cached) " >&6 +else + test "$cross_compiling" = yes && + as_fn_error $? "cannot check for file existence when cross compiling" "$LINENO" 5 +if test -r "${PERL_EXE}"; then + eval "$as_ac_File=yes" +else + eval "$as_ac_File=no" +fi +fi +eval ac_res=\$$as_ac_File + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +if eval test \"x\$"$as_ac_File"\" = x"yes"; then : + HAVE_PERL=yes +else + HAVE_PERL=no +fi + + PERL_EXE=${PERL_EXE} + +else + # Extract the first word of "perl", so it can be a program name with args. +set dummy perl; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_HAVE_PERL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$HAVE_PERL"; then + ac_cv_prog_HAVE_PERL="$HAVE_PERL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_HAVE_PERL="yes" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + test -z "$ac_cv_prog_HAVE_PERL" && ac_cv_prog_HAVE_PERL="no" +fi +fi +HAVE_PERL=$ac_cv_prog_HAVE_PERL +if test -n "$HAVE_PERL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $HAVE_PERL" >&5 +$as_echo "$HAVE_PERL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + PERL_EXE=perl + +fi + if test X${HAVE_PERL} = Xyes; then + USING_PERL_TRUE= + USING_PERL_FALSE='#' +else + USING_PERL_TRUE='#' + USING_PERL_FALSE= +fi + + + +if test "X$HAVE_PERL" != "Xyes" && + test "X$ac_cv_use_export_makefiles" != "Xno"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + as_fn_error $? "Failed to find the perl executable. The flag --enable-export-makefiles requires perl to be either in your path or explicitly defined by the flag --with-perl=. If you do not require the export makefiles to be installed via 'make install', you can disable the export makefiles with --disable-export-makefiles." "$LINENO" 5 +fi + +# Check for using gnumake to clean up link lines via +# gnumake's "shell" command. Optional dependency. + + + + +# Check whether --with-gnumake was given. +if test "${with_gnumake+set}" = set; then : + withval=$with_gnumake; ac_cv_use_gnumake=$withval +else + ac_cv_use_gnumake=no +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether gnumake specific code should be enabled" >&5 +$as_echo_n "checking whether gnumake specific code should be enabled... " >&6; } + +if test "X$ac_cv_use_gnumake" != "Xno"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + +$as_echo "#define HAVE_GNUMAKE /**/" >>confdefs.h + +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + if test "X$ac_cv_use_gnumake" = "Xyes"; then + USING_GNUMAKE_TRUE= + USING_GNUMAKE_FALSE='#' +else + USING_GNUMAKE_TRUE='#' + USING_GNUMAKE_FALSE= +fi + + + +if test "X$HAVE_PERL" != "Xyes" && + test "X$ac_cv_use_gnumake" != "Xno"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + as_fn_error $? "The flag --with-gnumake requires perl to be in your path. The perl executable can alternatively be explicitly defined by the flag --with-perl=." "$LINENO" 5 +fi + + + +# ------------------------------------------------------------------------ +# Checks for special package flags +# ------------------------------------------------------------------------ + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking data type for ZOLTAN_ID_TYPE" >&5 +$as_echo_n "checking data type for ZOLTAN_ID_TYPE... " >&6; } +zoltan_id_type="unset" + +# Check whether --with-id-type was given. +if test "${with_id_type+set}" = set; then : + withval=$with_id_type; +if test "X$withval" == "Xuint" ; then + +$as_echo "#define UNSIGNED_INT_GLOBAL_IDS 1" >>confdefs.h + + zoltan_id_type="unsigned int" +else + if test "X$withval" == "Xulong" ; then + +$as_echo "#define UNSIGNED_LONG_GLOBAL_IDS 1" >>confdefs.h + + zoltan_id_type="unsigned long" + else + if test "X$withval" == "Xullong" ; then + +$as_echo "#define UNSIGNED_LONG_LONG_GLOBAL_IDS 1" >>confdefs.h + + zoltan_id_type="unsigned long long" + else + as_fn_error $? "Valid global ID types for Zoltan are uint, ulong, and ullong" "$LINENO" 5 + fi + fi +fi + +else + + +$as_echo "#define UNSIGNED_INT_GLOBAL_IDS 1" >>confdefs.h + +zoltan_id_type="unsigned int" + + +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: typedef $zoltan_id_type ZOLTAN_ID_TYPE" >&5 +$as_echo "typedef $zoltan_id_type ZOLTAN_ID_TYPE" >&6; } + + + + +# Check whether --enable-gzip was given. +if test "${enable_gzip+set}" = set; then : + enableval=$enable_gzip; ac_cv_use_gzip=$enableval +else + ac_cv_use_gzip=no +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to use gzip" >&5 +$as_echo_n "checking whether to use gzip... " >&6; } + +if test "X$ac_cv_use_gzip" != "Xno"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + +$as_echo "#define HAVE_GZIP 1" >>confdefs.h + +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + +if test "X$ac_cv_use_gzip" != "Xno"; then + LIBS="-lz ${LIBS}" +fi + if test "X$ac_cv_use_gzip" != "Xno"; then + BUILD_GZIP_TRUE= + BUILD_GZIP_FALSE='#' +else + BUILD_GZIP_TRUE='#' + BUILD_GZIP_FALSE= +fi + + + + +# Check whether --with-parmetis was given. +if test "${with_parmetis+set}" = set; then : + withval=$with_parmetis; ac_cv_use_parmetis=$withval +else + ac_cv_use_parmetis=no +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to use parmetis" >&5 +$as_echo_n "checking whether to use parmetis... " >&6; } + +if test "X$ac_cv_use_parmetis" != "Xno"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + +$as_echo "#define HAVE_PARMETIS /**/" >>confdefs.h + +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "X$ac_cv_use_parmetis" != "Xno"; then + BUILD_PARMETIS_TRUE= + BUILD_PARMETIS_FALSE='#' +else + BUILD_PARMETIS_TRUE='#' + BUILD_PARMETIS_FALSE= +fi + + + +# Check whether --with-parmetis-libdir was given. +if test "${with_parmetis_libdir+set}" = set; then : + withval=$with_parmetis_libdir; tac_with_parmetis_libdir=$withval +else + tac_with_parmetis_libdir=no +fi + + +if test "X$tac_with_parmetis_libdir" != "Xno"; then + LIBS="-L${tac_with_parmetis_libdir} -lparmetis -lmetis ${LIBS}" +fi + + + +# Check whether --with-parmetis-incdir was given. +if test "${with_parmetis_incdir+set}" = set; then : + withval=$with_parmetis_incdir; tac_with_parmetis_incdir=$withval +else + tac_with_parmetis_incdir=no +fi + + +# It was necessary to move the parmetis include directories before the scotch +# include directories. +#if test "X$tac_with_parmetis_incdir" != "Xno"; then +# CPPFLAGS="-I${tac_with_parmetis_incdir} ${CPPFLAGS}" +#fi + + + + +# Check whether --with-scotch was given. +if test "${with_scotch+set}" = set; then : + withval=$with_scotch; ac_cv_use_scotch=$withval +else + ac_cv_use_scotch=no +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to use scotch" >&5 +$as_echo_n "checking whether to use scotch... " >&6; } + +if test "X$ac_cv_use_scotch" != "Xno"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + +$as_echo "#define HAVE_SCOTCH /**/" >>confdefs.h + +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "X$ac_cv_use_scotch" != "Xno"; then + BUILD_SCOTCH_TRUE= + BUILD_SCOTCH_FALSE='#' +else + BUILD_SCOTCH_TRUE='#' + BUILD_SCOTCH_FALSE= +fi + + + +# Check whether --with-scotch-libdir was given. +if test "${with_scotch_libdir+set}" = set; then : + withval=$with_scotch_libdir; tac_with_scotch_libdir=$withval +else + tac_with_scotch_libdir=no +fi + + +if test "X$tac_with_scotch_libdir" != "Xno"; then + LIBS="-L${tac_with_scotch_libdir} -lptscotch -lptscotcherr -lptscotcherrexit -lscotch -lscotcherr -lscotcherrexit ${LIBS}" +fi + + +# Check whether --with-scotch-incdir was given. +if test "${with_scotch_incdir+set}" = set; then : + withval=$with_scotch_incdir; tac_with_scotch_incdir=$withval +else + tac_with_scotch_incdir=no +fi + + +if test "X$tac_with_scotch_incdir" != "Xno"; then + CPPFLAGS="-I${tac_with_scotch_incdir} ${CPPFLAGS}" +fi +# The parmetis include directories need to be before the Scotch include +# directories. +if test "X$tac_with_parmetis_incdir" != "Xno"; then + CPPFLAGS="-I${tac_with_parmetis_incdir} ${CPPFLAGS}" +fi + + + +# Check whether --with-patoh was given. +if test "${with_patoh+set}" = set; then : + withval=$with_patoh; ac_cv_use_patoh=$withval +else + ac_cv_use_patoh=no +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to use patoh" >&5 +$as_echo_n "checking whether to use patoh... " >&6; } + +if test "X$ac_cv_use_patoh" != "Xno"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + +$as_echo "#define HAVE_PATOH /**/" >>confdefs.h + +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + +# Check whether --with-patoh-libdir was given. +if test "${with_patoh_libdir+set}" = set; then : + withval=$with_patoh_libdir; tac_with_patoh_libdir=$withval +else + tac_with_patoh_libdir=no +fi + + +if test "X$tac_with_patoh_libdir" != "Xno"; then + LIBS="-L${tac_with_patoh_libdir} -lpatoh ${LIBS}" +fi + + +# Check whether --with-patoh-incdir was given. +if test "${with_patoh_incdir+set}" = set; then : + withval=$with_patoh_incdir; tac_with_patoh_incdir=$withval +else + tac_with_patoh_incdir=no +fi + + +if test "X$tac_with_patoh_incdir" != "Xno"; then + CPPFLAGS="-I${tac_with_patoh_incdir} ${CPPFLAGS}" +fi + +#TAC_ARG_WITH_PACKAGE(drum, [Enable Drum support. Library name as well as library and include paths must be specified using LDFLAGS and CPPFLAGS.], DRUM, no) +#TAC_ARG_WITH_3PL_SUB(drum, libdir, [Specify where the Drum library is located. Ex. /path/to/library]) +#if test "X$tac_with_drum_libdir" != "Xno"; then +# LIBS="-L${tac_with_drum_libdir} ${LIBS}" +#fi +#TAC_ARG_WITH_3PL_SUB(drum, incdir, [Specify where the Drum header files are located. Ex. /path/to/headers]) +#if test "X$tac_with_drum_incdir" != "Xno"; then +# CPPFLAGS="-I${tac_with_drum_incdir} ${CPPFLAGS}" +#fi + + + +# Check whether --with-nemesis_exodus was given. +if test "${with_nemesis_exodus+set}" = set; then : + withval=$with_nemesis_exodus; ac_cv_use_nemesis_exodus=$withval +else + ac_cv_use_nemesis_exodus=no +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to use nemesis_exodus" >&5 +$as_echo_n "checking whether to use nemesis_exodus... " >&6; } + +if test "X$ac_cv_use_nemesis_exodus" != "Xno"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + +$as_echo "#define HAVE_NEMESIS_EXODUS /**/" >>confdefs.h + +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + +#I am not sure if we can do this. We may not be able to get things in the right +#order. JW. +if test "X$ac_cv_use_nemesis_exodus" != "Xno"; then + LIBS="-lnemIc -lexoIIv2c -lnetcdf ${LIBS}" +fi + +LIBS="${LIBS} -lm" + +#TAC_ARG_WITH_PACKAGE(parkway, [Enable Parkway support.], PARKWAY, no) +#TAC_ARG_WITH_3PL_SUB(parkway, libdir, [Specify where the Parkway library is located. Ex. /path/to/library]) +#if test "X$tac_with_parkway_libdir" != "Xno"; then +# LIBS="-L${tac_with_parkway_libdir} -lparkway -lpmpich++ -lstdc++ ${LIBS}" +#fi +#TAC_ARG_WITH_3PL_SUB(parkway, incdir, [Specify where the Parkway header files are located. Ex. /path/to/headers]) +#if test "X$tac_with_parkway_incdir" != "Xno"; then +# CPPFLAGS="-I${tac_with_parkway_incdir} ${CPPFLAGS}" +#fi + +# ------------------------------------------------------------------------ +# Checks if tests and examples should be built +# ------------------------------------------------------------------------ + + + +# Check whether --enable-tests was given. +if test "${enable_tests+set}" = set; then : + enableval=$enable_tests; ac_cv_use_tests=$enableval +else + ac_cv_use_tests=yes +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to use tests" >&5 +$as_echo_n "checking whether to use tests... " >&6; } + +if test "X$ac_cv_use_tests" != "Xno"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + +$as_echo "#define HAVE_TESTS /**/" >>confdefs.h + +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +# Check whether --enable-tests was given. +if test "${enable_tests+set}" = set; then : + enableval=$enable_tests; ac_cv_use_tests=$enableval +else + ac_cv_use_tests=yes +fi + + +# Check whether --enable-zoltan-tests was given. +if test "${enable_zoltan_tests+set}" = set; then : + enableval=$enable_zoltan_tests; ac_cv_use_zoltan_tests=$enableval +else + ac_cv_use_zoltan_tests=${ac_cv_use_tests} +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to use zoltan-tests" >&5 +$as_echo_n "checking whether to use zoltan-tests... " >&6; } + +if test "X$ac_cv_use_zoltan_tests" != "Xno"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + +$as_echo "#define HAVE_ZOLTAN_TESTS /**/" >>confdefs.h + +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "X$ac_cv_use_zoltan_tests" != "Xno"; then + BUILD_TESTS_TRUE= + BUILD_TESTS_FALSE='#' +else + BUILD_TESTS_TRUE='#' + BUILD_TESTS_FALSE= +fi + + + +# Check whether --enable-examples was given. +if test "${enable_examples+set}" = set; then : + enableval=$enable_examples; ac_cv_use_examples=$enableval +else + ac_cv_use_examples=yes +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to use examples" >&5 +$as_echo_n "checking whether to use examples... " >&6; } + +if test "X$ac_cv_use_examples" != "Xno"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + +$as_echo "#define HAVE_EXAMPLES /**/" >>confdefs.h + +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +# Check whether --enable-examples was given. +if test "${enable_examples+set}" = set; then : + enableval=$enable_examples; ac_cv_use_examples=$enableval +else + ac_cv_use_examples=yes +fi + + +# Check whether --enable-zoltan-examples was given. +if test "${enable_zoltan_examples+set}" = set; then : + enableval=$enable_zoltan_examples; ac_cv_use_zoltan_examples=$enableval +else + ac_cv_use_zoltan_examples=${ac_cv_use_examples} +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to use zoltan-examples" >&5 +$as_echo_n "checking whether to use zoltan-examples... " >&6; } + +if test "X$ac_cv_use_zoltan_examples" != "Xno"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + +$as_echo "#define HAVE_ZOLTAN_EXAMPLES /**/" >>confdefs.h + +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "X$ac_cv_use_zoltan_examples" != "Xno"; then + BUILD_EXAMPLES_TRUE= + BUILD_EXAMPLES_FALSE='#' +else + BUILD_EXAMPLES_TRUE='#' + BUILD_EXAMPLES_FALSE= +fi + + +#We now build tests and examples through separate make targets, rather than +#during "make". We still need to conditionally include the test and example +#in SUBDIRS, even though SUB_TEST and SUB_EXAMPLE will never be +#defined, so that the tests and examples are included in the distribution +#tarball. + if test "X$ac_cv_use_sub_test" = "Xyes"; then + SUB_TEST_TRUE= + SUB_TEST_FALSE='#' +else + SUB_TEST_TRUE='#' + SUB_TEST_FALSE= +fi + + if test "X$ac_cv_use_sub_example" = "Xyes"; then + SUB_EXAMPLE_TRUE= + SUB_EXAMPLE_FALSE='#' +else + SUB_EXAMPLE_TRUE='#' + SUB_EXAMPLE_FALSE= +fi + + +#TAC_ARG_ENABLE_FEATURE(libcheck, [Check for some third-party libraries. (Cannot be disabled unless tests and examples are also disabled.)], LIBCHECK, yes) + +# ------------------------------------------------------------------------ +# Specify other directories +# ------------------------------------------------------------------------ + +# enable use of --with-libdirs="-Llibdir1 -Llibdir2 ..." to prepend to LDFLAGS + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether additional library search paths defined" >&5 +$as_echo_n "checking whether additional library search paths defined... " >&6; } + +# Check whether --with-libdirs was given. +if test "${with_libdirs+set}" = set; then : + withval=$with_libdirs; +LDFLAGS="${withval} ${LDFLAGS}" +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${withval}" >&5 +$as_echo "${withval}" >&6; } + +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + +fi + + +# enable use of --with-incdirs="-Iincdir1 -Iincdir2 ..." to prepend to CPPFLAGS + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether additional include search paths defined" >&5 +$as_echo_n "checking whether additional include search paths defined... " >&6; } + +# Check whether --with-incdirs was given. +if test "${with_incdirs+set}" = set; then : + withval=$with_incdirs; +CPPFLAGS="${withval} ${CPPFLAGS}" +CFLAGS="${withval} ${CFLAGS}" +FCFLAGS="${withval} ${FCFLAGS}" +FFLAGS="${withval} ${FFLAGS}" +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${withval}" >&5 +$as_echo "${withval}" >&6; } + +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + +fi + + + +# ------------------------------------------------------------------------ +# Checks for libraries +# ------------------------------------------------------------------------ + +ax_cv_f90_modulecase="lower" +FC_VENDOR="none" + +# Define F77_FUNC that will be used to link with Fortran subroutines. +if test "X$ac_cv_use_fortran" != "Xno"; then + +ac_ext=${ac_fc_srcext-f} +ac_compile='$FC -c $FCFLAGS $ac_fcflags_srcext conftest.$ac_ext >&5' +ac_link='$FC -o conftest$ac_exeext $FCFLAGS $LDFLAGS $ac_fcflags_srcext conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_fc_compiler_gnu +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to get verbose linking output from $FC" >&5 +$as_echo_n "checking how to get verbose linking output from $FC... " >&6; } +if ${ac_cv_prog_fc_v+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat > conftest.$ac_ext <<_ACEOF + program main + + end +_ACEOF +if ac_fn_fc_try_compile "$LINENO"; then : + ac_cv_prog_fc_v= +# Try some options frequently used verbose output +for ac_verb in -v -verbose --verbose -V -\#\#\#; do + cat > conftest.$ac_ext <<_ACEOF + program main + + end +_ACEOF + +# Compile and link our simple test program by passing a flag (argument +# 1 to this macro) to the Fortran compiler in order to get +# "verbose" output that we can then parse for the Fortran linker +# flags. +ac_save_FCFLAGS=$FCFLAGS +FCFLAGS="$FCFLAGS $ac_verb" +eval "set x $ac_link" +shift +$as_echo "$as_me:${as_lineno-$LINENO}: $*" >&5 +# gfortran 4.3 outputs lines setting COLLECT_GCC_OPTIONS, COMPILER_PATH, +# LIBRARY_PATH; skip all such settings. +ac_fc_v_output=`eval $ac_link 5>&1 2>&1 | + sed '/^Driving:/d; /^Configured with:/d; + '"/^[_$as_cr_Letters][_$as_cr_alnum]*=/d"` +$as_echo "$ac_fc_v_output" >&5 +FCFLAGS=$ac_save_FCFLAGS + +rm -rf conftest* + +# On HP/UX there is a line like: "LPATH is: /foo:/bar:/baz" where +# /foo, /bar, and /baz are search directories for the Fortran linker. +# Here, we change these into -L/foo -L/bar -L/baz (and put it first): +ac_fc_v_output="`echo $ac_fc_v_output | + grep 'LPATH is:' | + sed 's|.*LPATH is\(: *[^ ]*\).*|\1|;s|: */| -L/|g'` $ac_fc_v_output" + +# FIXME: we keep getting bitten by quoted arguments; a more general fix +# that detects unbalanced quotes in FLIBS should be implemented +# and (ugh) tested at some point. +case $ac_fc_v_output in + # If we are using xlf then replace all the commas with spaces. + *xlfentry*) + ac_fc_v_output=`echo $ac_fc_v_output | sed 's/,/ /g'` ;; + + # With Intel ifc, ignore the quoted -mGLOB_options_string stuff (quoted + # $LIBS confuse us, and the libraries appear later in the output anyway). + *mGLOB_options_string*) + ac_fc_v_output=`echo $ac_fc_v_output | sed 's/"-mGLOB[^"]*"/ /g'` ;; + + # Portland Group compiler has singly- or doubly-quoted -cmdline argument + # Singly-quoted arguments were reported for versions 5.2-4 and 6.0-4. + # Doubly-quoted arguments were reported for "PGF90/x86 Linux/x86 5.0-2". + *-cmdline\ * | *-ignore\ * | *-def\ *) + ac_fc_v_output=`echo $ac_fc_v_output | sed "\ + s/-cmdline *'[^']*'/ /g; s/-cmdline *\"[^\"]*\"/ /g + s/-ignore *'[^']*'/ /g; s/-ignore *\"[^\"]*\"/ /g + s/-def *'[^']*'/ /g; s/-def *\"[^\"]*\"/ /g"` ;; + + # If we are using Cray Fortran then delete quotes. + *cft90*) + ac_fc_v_output=`echo $ac_fc_v_output | sed 's/"//g'` ;; +esac + + + # look for -l* and *.a constructs in the output + for ac_arg in $ac_fc_v_output; do + case $ac_arg in + [\\/]*.a | ?:[\\/]*.a | -[lLRu]*) + ac_cv_prog_fc_v=$ac_verb + break 2 ;; + esac + done +done +if test -z "$ac_cv_prog_fc_v"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cannot determine how to obtain linking information from $FC" >&5 +$as_echo "$as_me: WARNING: cannot determine how to obtain linking information from $FC" >&2;} +fi +else + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: compilation failed" >&5 +$as_echo "$as_me: WARNING: compilation failed" >&2;} +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_fc_v" >&5 +$as_echo "$ac_cv_prog_fc_v" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for Fortran libraries of $FC" >&5 +$as_echo_n "checking for Fortran libraries of $FC... " >&6; } +if ${ac_cv_fc_libs+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test "x$FCLIBS" != "x"; then + ac_cv_fc_libs="$FCLIBS" # Let the user override the test. +else + +cat > conftest.$ac_ext <<_ACEOF + program main + + end +_ACEOF + +# Compile and link our simple test program by passing a flag (argument +# 1 to this macro) to the Fortran compiler in order to get +# "verbose" output that we can then parse for the Fortran linker +# flags. +ac_save_FCFLAGS=$FCFLAGS +FCFLAGS="$FCFLAGS $ac_cv_prog_fc_v" +eval "set x $ac_link" +shift +$as_echo "$as_me:${as_lineno-$LINENO}: $*" >&5 +# gfortran 4.3 outputs lines setting COLLECT_GCC_OPTIONS, COMPILER_PATH, +# LIBRARY_PATH; skip all such settings. +ac_fc_v_output=`eval $ac_link 5>&1 2>&1 | + sed '/^Driving:/d; /^Configured with:/d; + '"/^[_$as_cr_Letters][_$as_cr_alnum]*=/d"` +$as_echo "$ac_fc_v_output" >&5 +FCFLAGS=$ac_save_FCFLAGS + +rm -rf conftest* + +# On HP/UX there is a line like: "LPATH is: /foo:/bar:/baz" where +# /foo, /bar, and /baz are search directories for the Fortran linker. +# Here, we change these into -L/foo -L/bar -L/baz (and put it first): +ac_fc_v_output="`echo $ac_fc_v_output | + grep 'LPATH is:' | + sed 's|.*LPATH is\(: *[^ ]*\).*|\1|;s|: */| -L/|g'` $ac_fc_v_output" + +# FIXME: we keep getting bitten by quoted arguments; a more general fix +# that detects unbalanced quotes in FLIBS should be implemented +# and (ugh) tested at some point. +case $ac_fc_v_output in + # If we are using xlf then replace all the commas with spaces. + *xlfentry*) + ac_fc_v_output=`echo $ac_fc_v_output | sed 's/,/ /g'` ;; + + # With Intel ifc, ignore the quoted -mGLOB_options_string stuff (quoted + # $LIBS confuse us, and the libraries appear later in the output anyway). + *mGLOB_options_string*) + ac_fc_v_output=`echo $ac_fc_v_output | sed 's/"-mGLOB[^"]*"/ /g'` ;; + + # Portland Group compiler has singly- or doubly-quoted -cmdline argument + # Singly-quoted arguments were reported for versions 5.2-4 and 6.0-4. + # Doubly-quoted arguments were reported for "PGF90/x86 Linux/x86 5.0-2". + *-cmdline\ * | *-ignore\ * | *-def\ *) + ac_fc_v_output=`echo $ac_fc_v_output | sed "\ + s/-cmdline *'[^']*'/ /g; s/-cmdline *\"[^\"]*\"/ /g + s/-ignore *'[^']*'/ /g; s/-ignore *\"[^\"]*\"/ /g + s/-def *'[^']*'/ /g; s/-def *\"[^\"]*\"/ /g"` ;; + + # If we are using Cray Fortran then delete quotes. + *cft90*) + ac_fc_v_output=`echo $ac_fc_v_output | sed 's/"//g'` ;; +esac + + + +ac_cv_fc_libs= + +# Save positional arguments (if any) +ac_save_positional="$@" + +set X $ac_fc_v_output +while test $# != 1; do + shift + ac_arg=$1 + case $ac_arg in + [\\/]*.a | ?:[\\/]*.a) + ac_exists=false + for ac_i in $ac_cv_fc_libs; do + if test x"$ac_arg" = x"$ac_i"; then + ac_exists=true + break + fi + done + + if test x"$ac_exists" = xtrue; then : + +else + ac_cv_fc_libs="$ac_cv_fc_libs $ac_arg" +fi + ;; + -bI:*) + ac_exists=false + for ac_i in $ac_cv_fc_libs; do + if test x"$ac_arg" = x"$ac_i"; then + ac_exists=true + break + fi + done + + if test x"$ac_exists" = xtrue; then : + +else + if test "$ac_compiler_gnu" = yes; then + for ac_link_opt in $ac_arg; do + ac_cv_fc_libs="$ac_cv_fc_libs -Xlinker $ac_link_opt" + done +else + ac_cv_fc_libs="$ac_cv_fc_libs $ac_arg" +fi +fi + ;; + # Ignore these flags. + -lang* | -lcrt*.o | -lc | -lgcc* | -lSystem | -libmil | -little \ + |-LANG:=* | -LIST:* | -LNO:* | -link) + ;; + -lkernel32) + test x"$CYGWIN" != xyes && ac_cv_fc_libs="$ac_cv_fc_libs $ac_arg" + ;; + -[LRuYz]) + # These flags, when seen by themselves, take an argument. + # We remove the space between option and argument and re-iterate + # unless we find an empty arg or a new option (starting with -) + case $2 in + "" | -*);; + *) + ac_arg="$ac_arg$2" + shift; shift + set X $ac_arg "$@" + ;; + esac + ;; + -YP,*) + for ac_j in `$as_echo "$ac_arg" | sed -e 's/-YP,/-L/;s/:/ -L/g'`; do + ac_exists=false + for ac_i in $ac_cv_fc_libs; do + if test x"$ac_j" = x"$ac_i"; then + ac_exists=true + break + fi + done + + if test x"$ac_exists" = xtrue; then : + +else + ac_arg="$ac_arg $ac_j" + ac_cv_fc_libs="$ac_cv_fc_libs $ac_j" +fi + done + ;; + -[lLR]*) + ac_exists=false + for ac_i in $ac_cv_fc_libs; do + if test x"$ac_arg" = x"$ac_i"; then + ac_exists=true + break + fi + done + + if test x"$ac_exists" = xtrue; then : + +else + ac_cv_fc_libs="$ac_cv_fc_libs $ac_arg" +fi + ;; + -zallextract*| -zdefaultextract) + ac_cv_fc_libs="$ac_cv_fc_libs $ac_arg" + ;; + # Ignore everything else. + esac +done +# restore positional arguments +set X $ac_save_positional; shift + +# We only consider "LD_RUN_PATH" on Solaris systems. If this is seen, +# then we insist that the "run path" must be an absolute path (i.e. it +# must begin with a "/"). +case `(uname -sr) 2>/dev/null` in + "SunOS 5"*) + ac_ld_run_path=`$as_echo "$ac_fc_v_output" | + sed -n 's,^.*LD_RUN_PATH *= *\(/[^ ]*\).*$,-R\1,p'` + test "x$ac_ld_run_path" != x && + if test "$ac_compiler_gnu" = yes; then + for ac_link_opt in $ac_ld_run_path; do + ac_cv_fc_libs="$ac_cv_fc_libs -Xlinker $ac_link_opt" + done +else + ac_cv_fc_libs="$ac_cv_fc_libs $ac_ld_run_path" +fi + ;; +esac +fi # test "x$[]_AC_LANG_PREFIX[]LIBS" = "x" + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_fc_libs" >&5 +$as_echo "$ac_cv_fc_libs" >&6; } +FCLIBS="$ac_cv_fc_libs" + + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + +ac_ext=${ac_fc_srcext-f} +ac_compile='$FC -c $FCFLAGS $ac_fcflags_srcext conftest.$ac_ext >&5' +ac_link='$FC -o conftest$ac_exeext $FCFLAGS $LDFLAGS $ac_fcflags_srcext conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_fc_compiler_gnu + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for dummy main to link with Fortran libraries" >&5 +$as_echo_n "checking for dummy main to link with Fortran libraries... " >&6; } +if ${ac_cv_fc_dummy_main+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_fc_dm_save_LIBS=$LIBS + LIBS="$LIBS $FCLIBS" + ac_fortran_dm_var=FC_DUMMY_MAIN + ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + # First, try linking without a dummy main: + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#ifdef FC_DUMMY_MAIN +#ifndef FC_DUMMY_MAIN_EQ_F77 +# ifdef __cplusplus + extern "C" +# endif + int FC_DUMMY_MAIN() { return 1; } +#endif +#endif +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_fortran_dummy_main=none +else + ac_cv_fortran_dummy_main=unknown +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + + if test $ac_cv_fortran_dummy_main = unknown; then + for ac_func in MAIN__ MAIN_ __main MAIN _MAIN __MAIN main_ main__ _main; do + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#define $ac_fortran_dm_var $ac_func +#ifdef FC_DUMMY_MAIN +#ifndef FC_DUMMY_MAIN_EQ_F77 +# ifdef __cplusplus + extern "C" +# endif + int FC_DUMMY_MAIN() { return 1; } +#endif +#endif +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_fortran_dummy_main=$ac_func; break +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + done + fi + ac_ext=${ac_fc_srcext-f} +ac_compile='$FC -c $FCFLAGS $ac_fcflags_srcext conftest.$ac_ext >&5' +ac_link='$FC -o conftest$ac_exeext $FCFLAGS $LDFLAGS $ac_fcflags_srcext conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_fc_compiler_gnu + ac_cv_fc_dummy_main=$ac_cv_fortran_dummy_main + rm -rf conftest* + LIBS=$ac_fc_dm_save_LIBS + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_fc_dummy_main" >&5 +$as_echo "$ac_cv_fc_dummy_main" >&6; } +FC_DUMMY_MAIN=$ac_cv_fc_dummy_main +if test "$FC_DUMMY_MAIN" != unknown; then : + if test $FC_DUMMY_MAIN != none; then + +cat >>confdefs.h <<_ACEOF +#define FC_DUMMY_MAIN $FC_DUMMY_MAIN +_ACEOF + + if test "x$ac_cv_fc_dummy_main" = "x$ac_cv_f77_dummy_main"; then + +$as_echo "#define FC_DUMMY_MAIN_EQ_F77 1" >>confdefs.h + + fi +fi +else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "linking to Fortran libraries from C fails +See \`config.log' for more details" "$LINENO" 5; } +fi + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + +ac_ext=${ac_fc_srcext-f} +ac_compile='$FC -c $FCFLAGS $ac_fcflags_srcext conftest.$ac_ext >&5' +ac_link='$FC -o conftest$ac_exeext $FCFLAGS $LDFLAGS $ac_fcflags_srcext conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_fc_compiler_gnu +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for Fortran name-mangling scheme" >&5 +$as_echo_n "checking for Fortran name-mangling scheme... " >&6; } +if ${ac_cv_fc_mangling+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat > conftest.$ac_ext <<_ACEOF + subroutine foobar() + return + end + subroutine foo_bar() + return + end +_ACEOF +if ac_fn_fc_try_compile "$LINENO"; then : + mv conftest.$ac_objext cfortran_test.$ac_objext + + ac_save_LIBS=$LIBS + LIBS="cfortran_test.$ac_objext $LIBS $FCLIBS" + + ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + ac_success=no + for ac_foobar in foobar FOOBAR; do + for ac_underscore in "" "_"; do + ac_func="$ac_foobar$ac_underscore" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char $ac_func (); +#ifdef FC_DUMMY_MAIN +#ifndef FC_DUMMY_MAIN_EQ_F77 +# ifdef __cplusplus + extern "C" +# endif + int FC_DUMMY_MAIN() { return 1; } +#endif +#endif +int +main () +{ +return $ac_func (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_success=yes; break 2 +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + done + done + ac_ext=${ac_fc_srcext-f} +ac_compile='$FC -c $FCFLAGS $ac_fcflags_srcext conftest.$ac_ext >&5' +ac_link='$FC -o conftest$ac_exeext $FCFLAGS $LDFLAGS $ac_fcflags_srcext conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_fc_compiler_gnu + + if test "$ac_success" = "yes"; then + case $ac_foobar in + foobar) + ac_case=lower + ac_foo_bar=foo_bar + ;; + FOOBAR) + ac_case=upper + ac_foo_bar=FOO_BAR + ;; + esac + + ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + ac_success_extra=no + for ac_extra in "" "_"; do + ac_func="$ac_foo_bar$ac_underscore$ac_extra" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char $ac_func (); +#ifdef FC_DUMMY_MAIN +#ifndef FC_DUMMY_MAIN_EQ_F77 +# ifdef __cplusplus + extern "C" +# endif + int FC_DUMMY_MAIN() { return 1; } +#endif +#endif +int +main () +{ +return $ac_func (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_success_extra=yes; break +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + done + ac_ext=${ac_fc_srcext-f} +ac_compile='$FC -c $FCFLAGS $ac_fcflags_srcext conftest.$ac_ext >&5' +ac_link='$FC -o conftest$ac_exeext $FCFLAGS $LDFLAGS $ac_fcflags_srcext conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_fc_compiler_gnu + + if test "$ac_success_extra" = "yes"; then + ac_cv_fc_mangling="$ac_case case" + if test -z "$ac_underscore"; then + ac_cv_fc_mangling="$ac_cv_fc_mangling, no underscore" + else + ac_cv_fc_mangling="$ac_cv_fc_mangling, underscore" + fi + if test -z "$ac_extra"; then + ac_cv_fc_mangling="$ac_cv_fc_mangling, no extra underscore" + else + ac_cv_fc_mangling="$ac_cv_fc_mangling, extra underscore" + fi + else + ac_cv_fc_mangling="unknown" + fi + else + ac_cv_fc_mangling="unknown" + fi + + LIBS=$ac_save_LIBS + rm -rf conftest* + rm -f cfortran_test* +else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "cannot compile a simple Fortran program +See \`config.log' for more details" "$LINENO" 5; } +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_fc_mangling" >&5 +$as_echo "$ac_cv_fc_mangling" >&6; } + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + +ac_ext=${ac_fc_srcext-f} +ac_compile='$FC -c $FCFLAGS $ac_fcflags_srcext conftest.$ac_ext >&5' +ac_link='$FC -o conftest$ac_exeext $FCFLAGS $LDFLAGS $ac_fcflags_srcext conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_fc_compiler_gnu +case $ac_cv_fc_mangling in + "lower case, no underscore, no extra underscore") + $as_echo "#define FC_FUNC(name,NAME) name" >>confdefs.h + + $as_echo "#define FC_FUNC_(name,NAME) name" >>confdefs.h + ;; + "lower case, no underscore, extra underscore") + $as_echo "#define FC_FUNC(name,NAME) name" >>confdefs.h + + $as_echo "#define FC_FUNC_(name,NAME) name ## _" >>confdefs.h + ;; + "lower case, underscore, no extra underscore") + $as_echo "#define FC_FUNC(name,NAME) name ## _" >>confdefs.h + + $as_echo "#define FC_FUNC_(name,NAME) name ## _" >>confdefs.h + ;; + "lower case, underscore, extra underscore") + $as_echo "#define FC_FUNC(name,NAME) name ## _" >>confdefs.h + + $as_echo "#define FC_FUNC_(name,NAME) name ## __" >>confdefs.h + ;; + "upper case, no underscore, no extra underscore") + $as_echo "#define FC_FUNC(name,NAME) NAME" >>confdefs.h + + $as_echo "#define FC_FUNC_(name,NAME) NAME" >>confdefs.h + ;; + "upper case, no underscore, extra underscore") + $as_echo "#define FC_FUNC(name,NAME) NAME" >>confdefs.h + + $as_echo "#define FC_FUNC_(name,NAME) NAME ## _" >>confdefs.h + ;; + "upper case, underscore, no extra underscore") + $as_echo "#define FC_FUNC(name,NAME) NAME ## _" >>confdefs.h + + $as_echo "#define FC_FUNC_(name,NAME) NAME ## _" >>confdefs.h + ;; + "upper case, underscore, extra underscore") + $as_echo "#define FC_FUNC(name,NAME) NAME ## _" >>confdefs.h + + $as_echo "#define FC_FUNC_(name,NAME) NAME ## __" >>confdefs.h + ;; + *) + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unknown Fortran name-mangling scheme" >&5 +$as_echo "$as_me: WARNING: unknown Fortran name-mangling scheme" >&2;} + ;; +esac + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + ac_ext=${ac_fc_srcext-f} +ac_compile='$FC -c $FCFLAGS $ac_fcflags_srcext conftest.$ac_ext >&5' +ac_link='$FC -o conftest$ac_exeext $FCFLAGS $LDFLAGS $ac_fcflags_srcext conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_fc_compiler_gnu +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for Fortran flag to compile .f files" >&5 +$as_echo_n "checking for Fortran flag to compile .f files... " >&6; } +if ${ac_cv_fc_srcext_f+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_ext=f +ac_fcflags_srcext_save=$ac_fcflags_srcext +ac_fcflags_srcext= +ac_cv_fc_srcext_f=unknown +for ac_flag in none -qsuffix=f=f -Tf; do + test "x$ac_flag" != xnone && ac_fcflags_srcext="$ac_flag" + cat > conftest.$ac_ext <<_ACEOF + program main + + end +_ACEOF +if ac_fn_fc_try_compile "$LINENO"; then : + ac_cv_fc_srcext_f=$ac_flag; break +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +done +rm -f conftest.$ac_objext conftest.f +ac_fcflags_srcext=$ac_fcflags_srcext_save + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_fc_srcext_f" >&5 +$as_echo "$ac_cv_fc_srcext_f" >&6; } +if test "x$ac_cv_fc_srcext_f" = xunknown; then + as_fn_error $? "Fortran could not compile .f files" "$LINENO" 5 +else + ac_fc_srcext=f + if test "x$ac_cv_fc_srcext_f" = xnone; then + ac_fcflags_srcext="" + FCFLAGS_f="" + else + ac_fcflags_srcext=$ac_cv_fc_srcext_f + FCFLAGS_f=$ac_cv_fc_srcext_f + fi + + +fi +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + ac_ext=${ac_fc_srcext-f} +ac_compile='$FC -c $FCFLAGS $ac_fcflags_srcext conftest.$ac_ext >&5' +ac_link='$FC -o conftest$ac_exeext $FCFLAGS $LDFLAGS $ac_fcflags_srcext conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_fc_compiler_gnu +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for Fortran flag to compile .f90 files" >&5 +$as_echo_n "checking for Fortran flag to compile .f90 files... " >&6; } +if ${ac_cv_fc_srcext_f90+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_ext=f90 +ac_fcflags_srcext_save=$ac_fcflags_srcext +ac_fcflags_srcext= +ac_cv_fc_srcext_f90=unknown +for ac_flag in none -qsuffix=f=f90 -Tf; do + test "x$ac_flag" != xnone && ac_fcflags_srcext="$ac_flag" + cat > conftest.$ac_ext <<_ACEOF + program main + + end +_ACEOF +if ac_fn_fc_try_compile "$LINENO"; then : + ac_cv_fc_srcext_f90=$ac_flag; break +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +done +rm -f conftest.$ac_objext conftest.f90 +ac_fcflags_srcext=$ac_fcflags_srcext_save + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_fc_srcext_f90" >&5 +$as_echo "$ac_cv_fc_srcext_f90" >&6; } +if test "x$ac_cv_fc_srcext_f90" = xunknown; then + as_fn_error $? "Fortran could not compile .f90 files" "$LINENO" 5 +else + ac_fc_srcext=f90 + if test "x$ac_cv_fc_srcext_f90" = xnone; then + ac_fcflags_srcext="" + FCFLAGS_f90="" + else + ac_fcflags_srcext=$ac_cv_fc_srcext_f90 + FCFLAGS_f90=$ac_cv_fc_srcext_f90 + fi + + +fi +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + # Determine compile-line flag for F90 modules (e.g., -M). + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking fortran 90 modules inclusion flag" >&5 +$as_echo_n "checking fortran 90 modules inclusion flag... " >&6; } +if ${ax_cv_f90_modflag+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_ext=${ac_fc_srcext-f} +ac_compile='$FC -c $FCFLAGS $ac_fcflags_srcext conftest.$ac_ext >&5' +ac_link='$FC -o conftest$ac_exeext $FCFLAGS $LDFLAGS $ac_fcflags_srcext conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_fc_compiler_gnu + +i=0 +while test \( -f tmpdir_$i \) -o \( -d tmpdir_$i \) ; do + i=`expr $i + 1` +done +mkdir tmpdir_$i +cd tmpdir_$i +cat > conftest.$ac_ext <<_ACEOF + +!234567 + module conftest_module + contains + subroutine conftest_routine + write(*,'(a)') 'gotcha!' + end subroutine conftest_routine + end module conftest_module + +_ACEOF +if ac_fn_fc_try_compile "$LINENO"; then : + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +cd .. +ax_cv_f90_modflag="not found" +for ax_flag in "-I " "-M" "-p"; do + if test "$ax_cv_f90_modflag" = "not found" ; then + ax_save_FCFLAGS="$FCFLAGS" + FCFLAGS="$ax_save_FCFLAGS ${ax_flag}tmpdir_$i" + cat > conftest.$ac_ext <<_ACEOF + +!234567 + program conftest_program + use conftest_module + call conftest_routine + end program conftest_program + +_ACEOF +if ac_fn_fc_try_compile "$LINENO"; then : + ax_cv_f90_modflag="$ax_flag" +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + FCFLAGS="$ax_save_FCFLAGS" + fi +done +rm -fr tmpdir_$i +if test "$ax_cv_f90_modflag" = "not found" ; then + as_fn_error $? "unable to find compiler flag for modules inclusion" "$LINENO" 5 +fi +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_f90_modflag" >&5 +$as_echo "$ax_cv_f90_modflag" >&6; } + if test "X$ax_cv_f90_modflag" = "Xunknown" ; then + as_fn_error $? "unable to find f90 modules extension" "$LINENO" 5 + else + FCFLAGS="$ax_cv_f90_modflag../ $ax_cv_f90_modflag. ${FCFLAGS}" + fi + # Determine case (upper or lower) of F90 module files. + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking fortran 90 module file suffix and case" >&5 +$as_echo_n "checking fortran 90 module file suffix and case... " >&6; } +if ${ax_cv_f90_modulecase+:} false; then : + $as_echo_n "(cached) " >&6 +else + +rm -f conftest* +cat >conftest.f < conftest.out 2>&1 ; then + FCMODSUFFIX=`ls conftest* | grep -v conftest.f | grep -v conftest.o` + echo "KDDKDD CASE 2" ${FCMODSUFFIX} + FCMODSUFFIX=`echo "${FCMODSUFFIX}" | sed -e 's/conftest\.//g'` + if test -z "${FCMODSUFFIX}" ; then + FCMODSUFFIX=`ls CONFTEST* 2>/dev/null \ + | grep -v CONFTEST.f | grep -v CONFTEST.o` + FCMODSUFFIX=`echo "${FCMODSUFFIX}" | sed -e 's/CONFTEST\.//g'` + if test -n "${FCMODSUFFIX}" ; then + testname="CONFTEST" + modcase="upper" + fi + fi + if test -z "${FCMODSUFFIX}" ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: unknown" >&5 +$as_echo "unknown" >&6; } + # Use mod if we can't figure it out + FCMODSUFFIX="mod" + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${FCMODSUFFIX}" >&5 +$as_echo "${FCMODSUFFIX}" >&6; } + fi +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: unknown" >&5 +$as_echo "unknown" >&6; } +fi +#AC_SUBST(FCMODSUFFIX) +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for case of module names" >&5 +$as_echo_n "checking for case of module names... " >&6; } +if test "${modcase}" = "lower" ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: lower" >&5 +$as_echo "lower" >&6; } + ax_cv_f90_modulecase="lower" +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: upper" >&5 +$as_echo "upper" >&6; } + ax_cv_f90_modulecase="upper" +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_f90_modulecase" >&5 +$as_echo "$ax_cv_f90_modulecase" >&6; } + echo "KDDKDD " $ax_cv_f90_modulecase + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking the compiler ID" >&5 +$as_echo_n "checking the compiler ID... " >&6; } +if ${wk_cv_prog_f90_version_string+:} false; then : + $as_echo_n "(cached) " >&6 +else + $FC -version >conftest.log 2>&1 +$FC -V >>conftest.log 2>&1 +$FC --version >>conftest.log 2>&1 + +wk_grep_f90_NAG=`grep NAG conftest.log | head -1` +wk_grep_f90_Compaq=`grep Compaq conftest.log | head -1` +wk_grep_f90_Digital=`grep DIGITAL conftest.log | head -1` +wk_grep_f90_SGI=`grep MIPS conftest.log | head -1` +wk_grep_f90_Intel=`grep 'Intel(R)' conftest.log | head -1` +wk_grep_f90_Sun=`grep 'Sun' conftest.log | head -1` +wk_grep_f90_Lahey=`grep 'Lahey' conftest.log | head -1` +wk_grep_f90_PGI=`grep 'pgf' conftest.log | head -1` +wk_grep_f90_G95=`grep -i 'g95' conftest.log | grep -i 'gcc' | head -1` +wk_grep_f90_GFORTRAN=`grep -i 'GNU Fortran' conftest.log | head -1` +wk_grep_f90_Absoft=`grep -i 'Absoft' conftest.log | head -1` + +if test -n "$wk_grep_f90_NAG"; then + wk_cv_prog_f90_type="NAG" + wk_cv_prog_f90_version_string=$wk_grep_f90_NAG + wk_cv_prog_f90_version=`echo $wk_cv_prog_f90_version_string | sed -e 's/.* Release \([0-9][0-9]*\.[0-9][0-9]*.*$\)/\1/'` + wk_cv_prog_f90_major_version=`echo $wk_cv_prog_f90_version | sed -e 's/\([0-9][0-9]*\)\..*/\1/'` +elif test -n "$wk_grep_f90_Compaq"; then + wk_cv_prog_f90_type="Compaq" + wk_cv_prog_f90_version_string=$wk_grep_f90_Compaq +elif test -n "$wk_grep_f90_Digital"; then + wk_cv_prog_f90_type="DEC" + wk_cv_prog_f90_version_string=$wk_grep_f90_Digital +elif test -n "$wk_grep_f90_SGI"; then + wk_cv_prog_f90_type="SGI" + wk_cv_prog_f90_version_string=$wk_grep_f90_SGI +elif test -n "$wk_grep_f90_Intel"; then + wk_cv_prog_f90_type="Intel" + wk_cv_prog_f90_version_string=$wk_grep_f90_Intel + wk_cv_prog_f90_version=`echo $wk_cv_prog_f90_version_string | sed -e 's/.* Version \([0-9][0-9]*\.[0-9][0-9]*\) .*/\1/'` + wk_cv_prog_f90_major_version=`echo $wk_cv_prog_f90_version | sed -e 's/\([0-9][0-9]*\)\..*/\1/'` +elif test -n "$wk_grep_f90_Sun"; then + wk_cv_prog_f90_type="Sun" + wk_cv_prog_f90_version_string=$wk_grep_f90_Sun + wk_cv_prog_f90_version=`echo $wk_cv_prog_f90_version_string | sed -e 's/.* Fortran 95 \([0-9][0-9]*\.[0-9][0-9]*\) .*/\1/'` + wk_cv_prog_f90_major_version=`echo $wk_cv_prog_f90_version | sed -e 's/\([0-9][0-9]*\)\..*/\1/'` +elif test -n "$wk_grep_f90_Lahey"; then + wk_cv_prog_f90_type="Lahey" + wk_cv_prog_f90_version_string=$wk_grep_f90_Lahey +elif test -n "$wk_grep_f90_PGI"; then + wk_cv_prog_f90_type="PGI" + wk_cv_prog_f90_version_string=$wk_grep_f90_PGI +elif test -n "$wk_grep_f90_G95"; then + wk_cv_prog_f90_type="G95" + wk_cv_prog_f90_version_string=$wk_grep_f90_G95 +elif test -n "$wk_grep_f90_GFORTRAN"; then + wk_cv_prog_f90_type="GNU" + wk_cv_prog_f90_version_string=$wk_grep_f90_GFORTRAN + wk_cv_prog_f90_version=`echo $wk_cv_prog_f90_version_string | sed -e 's/.*\([0-9][0-9]*\.[0-9][0-9]*\.[0-9][0-9]*\).*/\1/'` + wk_cv_prog_f90_major_version=`echo $wk_cv_prog_f90_version | sed -e 's/\([0-9][0-9]*\)\..*/\1/'` +elif test -n "$wk_grep_f90_Absoft"; then + wk_cv_prog_f90_type="Absoft" + wk_cv_prog_f90_version_string=$wk_grep_f90_Absoft +else + wk_cv_prog_f90_type="unknown" + wk_cv_prog_f90_version_string="unknown" +fi + +rm -f conftest.log + + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $wk_cv_prog_f90_version_string" >&5 +$as_echo "$wk_cv_prog_f90_version_string" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking the compiler vendor" >&5 +$as_echo_n "checking the compiler vendor... " >&6; } +if ${wk_cv_prog_f90_type+:} false; then : + $as_echo_n "(cached) " >&6 +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $wk_cv_prog_f90_type" >&5 +$as_echo "$wk_cv_prog_f90_type" >&6; } + +if test -n "$wk_cv_prog_f90_version"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking the compiler version" >&5 +$as_echo_n "checking the compiler version... " >&6; } +if ${wk_cv_prog_f90_version+:} false; then : + $as_echo_n "(cached) " >&6 +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $wk_cv_prog_f90_version" >&5 +$as_echo "$wk_cv_prog_f90_version" >&6; } +else + wk_cv_prog_f90_version=$wk_cv_prog_f90_version_string +fi + +if test -n "$wk_cv_prog_f90_major_version"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking the compiler major version" >&5 +$as_echo_n "checking the compiler major version... " >&6; } +if ${wk_cv_prog_f90_major_version+:} false; then : + $as_echo_n "(cached) " >&6 +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $wk_cv_prog_f90_major_version" >&5 +$as_echo "$wk_cv_prog_f90_major_version" >&6; } +else + wk_cv_prog_f90_major_version=$wk_cv_prog_f90_version +fi + +FC_VERSION_STRING=$wk_cv_prog_f90_version_string +FC_VENDOR=$wk_cv_prog_f90_type +FC_VERSION=$wk_cv_prog_f90_version +FC_MAJOR_VERSION=$wk_cv_prog_f90_major_version + + + + + +FC_MODNAME='$(1:.o=.mod)' +FC_MODNAME_Q='\$(1:.o=.mod)' + + + + + echo "KDDKDD " $FC_VENDOR + if test "$FC_VENDOR" = "PGI"; then + FCFLAGS="-DPGI ${FCFLAGS}" + CFLAGS="-DPGI ${CFLAGS}" + fi + if test "$FC_VENDOR" = "Lahey"; then + FCFLAGS="-DFUJITSU ${FCFLAGS}" + CFLAGS="-DFUJITSU ${CFLAGS}" + fi +fi + if test "X$ax_cv_f90_modulecase" != "Xupper"; then + F90_MODULE_LOWERCASE_TRUE= + F90_MODULE_LOWERCASE_FALSE='#' +else + F90_MODULE_LOWERCASE_TRUE='#' + F90_MODULE_LOWERCASE_FALSE= +fi + + if test "X$FC_VENDOR" == "XNAG"; then + NAG_F90_COMPILER_TRUE= + NAG_F90_COMPILER_FALSE='#' +else + NAG_F90_COMPILER_TRUE='#' + NAG_F90_COMPILER_FALSE= +fi + + +# If tests, examples and libcheck are disabled, we don't have to check +# for these libraries. + +#if test "X$ac_cv_use_new_package_examples" != "Xno" || test "X$ac_cv_use_libcheck" != "Xno"; then +#if test "X$ac_cv_use_zoltan_tests" != "Xno" || test "X$ac_cv_use_zoltan_examples" != "Xno" || test "X$ac_cv_use_libcheck" != "Xno"; then +# checks for libraries now +#AC_SEARCH_LIBS(pow,[m],,AC_MSG_ERROR(Cannot find math library)) +#AC_SEARCH_LIBS(sqrt,[m],,AC_MSG_ERROR(Cannot find math library)) +#fi +# end of the list of libraries that don't need to be checked for if +# tests and examples are disabled. + +# ------------------------------------------------------------------------ +# Checks for linker characteristics +# ------------------------------------------------------------------------ + +# Determine libraries needed for linking with Fortran +#AC_FC_LIBRARY_LDFLAGS +if test "X$ac_cv_use_fortran" = "Xyes"; then +ac_ext=${ac_fc_srcext-f} +ac_compile='$FC -c $FCFLAGS $ac_fcflags_srcext conftest.$ac_ext >&5' +ac_link='$FC -o conftest$ac_exeext $FCFLAGS $LDFLAGS $ac_fcflags_srcext conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_fc_compiler_gnu +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to get verbose linking output from $FC" >&5 +$as_echo_n "checking how to get verbose linking output from $FC... " >&6; } +if ${ac_cv_prog_fc_v+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat > conftest.$ac_ext <<_ACEOF + program main + + end +_ACEOF +if ac_fn_fc_try_compile "$LINENO"; then : + ac_cv_prog_fc_v= +# Try some options frequently used verbose output +for ac_verb in -v -verbose --verbose -V -\#\#\#; do + cat > conftest.$ac_ext <<_ACEOF + program main + + end +_ACEOF + +# Compile and link our simple test program by passing a flag (argument +# 1 to this macro) to the Fortran compiler in order to get +# "verbose" output that we can then parse for the Fortran linker +# flags. +ac_save_FCFLAGS=$FCFLAGS +FCFLAGS="$FCFLAGS $ac_verb" +eval "set x $ac_link" +shift +$as_echo "$as_me:${as_lineno-$LINENO}: $*" >&5 +# gfortran 4.3 outputs lines setting COLLECT_GCC_OPTIONS, COMPILER_PATH, +# LIBRARY_PATH; skip all such settings. +ac_fc_v_output=`eval $ac_link 5>&1 2>&1 | + sed '/^Driving:/d; /^Configured with:/d; + '"/^[_$as_cr_Letters][_$as_cr_alnum]*=/d"` +$as_echo "$ac_fc_v_output" >&5 +FCFLAGS=$ac_save_FCFLAGS + +rm -rf conftest* + +# On HP/UX there is a line like: "LPATH is: /foo:/bar:/baz" where +# /foo, /bar, and /baz are search directories for the Fortran linker. +# Here, we change these into -L/foo -L/bar -L/baz (and put it first): +ac_fc_v_output="`echo $ac_fc_v_output | + grep 'LPATH is:' | + sed 's|.*LPATH is\(: *[^ ]*\).*|\1|;s|: */| -L/|g'` $ac_fc_v_output" + +# FIXME: we keep getting bitten by quoted arguments; a more general fix +# that detects unbalanced quotes in FLIBS should be implemented +# and (ugh) tested at some point. +case $ac_fc_v_output in + # If we are using xlf then replace all the commas with spaces. + *xlfentry*) + ac_fc_v_output=`echo $ac_fc_v_output | sed 's/,/ /g'` ;; + + # With Intel ifc, ignore the quoted -mGLOB_options_string stuff (quoted + # $LIBS confuse us, and the libraries appear later in the output anyway). + *mGLOB_options_string*) + ac_fc_v_output=`echo $ac_fc_v_output | sed 's/"-mGLOB[^"]*"/ /g'` ;; + + # Portland Group compiler has singly- or doubly-quoted -cmdline argument + # Singly-quoted arguments were reported for versions 5.2-4 and 6.0-4. + # Doubly-quoted arguments were reported for "PGF90/x86 Linux/x86 5.0-2". + *-cmdline\ * | *-ignore\ * | *-def\ *) + ac_fc_v_output=`echo $ac_fc_v_output | sed "\ + s/-cmdline *'[^']*'/ /g; s/-cmdline *\"[^\"]*\"/ /g + s/-ignore *'[^']*'/ /g; s/-ignore *\"[^\"]*\"/ /g + s/-def *'[^']*'/ /g; s/-def *\"[^\"]*\"/ /g"` ;; + + # If we are using Cray Fortran then delete quotes. + *cft90*) + ac_fc_v_output=`echo $ac_fc_v_output | sed 's/"//g'` ;; +esac + + + # look for -l* and *.a constructs in the output + for ac_arg in $ac_fc_v_output; do + case $ac_arg in + [\\/]*.a | ?:[\\/]*.a | -[lLRu]*) + ac_cv_prog_fc_v=$ac_verb + break 2 ;; + esac + done +done +if test -z "$ac_cv_prog_fc_v"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cannot determine how to obtain linking information from $FC" >&5 +$as_echo "$as_me: WARNING: cannot determine how to obtain linking information from $FC" >&2;} +fi +else + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: compilation failed" >&5 +$as_echo "$as_me: WARNING: compilation failed" >&2;} +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_fc_v" >&5 +$as_echo "$ac_cv_prog_fc_v" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for Fortran libraries of $FC" >&5 +$as_echo_n "checking for Fortran libraries of $FC... " >&6; } +if ${ac_cv_fc_libs+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test "x$FCLIBS" != "x"; then + ac_cv_fc_libs="$FCLIBS" # Let the user override the test. +else + +cat > conftest.$ac_ext <<_ACEOF + program main + + end +_ACEOF + +# Compile and link our simple test program by passing a flag (argument +# 1 to this macro) to the Fortran compiler in order to get +# "verbose" output that we can then parse for the Fortran linker +# flags. +ac_save_FCFLAGS=$FCFLAGS +FCFLAGS="$FCFLAGS $ac_cv_prog_fc_v" +eval "set x $ac_link" +shift +$as_echo "$as_me:${as_lineno-$LINENO}: $*" >&5 +# gfortran 4.3 outputs lines setting COLLECT_GCC_OPTIONS, COMPILER_PATH, +# LIBRARY_PATH; skip all such settings. +ac_fc_v_output=`eval $ac_link 5>&1 2>&1 | + sed '/^Driving:/d; /^Configured with:/d; + '"/^[_$as_cr_Letters][_$as_cr_alnum]*=/d"` +$as_echo "$ac_fc_v_output" >&5 +FCFLAGS=$ac_save_FCFLAGS + +rm -rf conftest* + +# On HP/UX there is a line like: "LPATH is: /foo:/bar:/baz" where +# /foo, /bar, and /baz are search directories for the Fortran linker. +# Here, we change these into -L/foo -L/bar -L/baz (and put it first): +ac_fc_v_output="`echo $ac_fc_v_output | + grep 'LPATH is:' | + sed 's|.*LPATH is\(: *[^ ]*\).*|\1|;s|: */| -L/|g'` $ac_fc_v_output" + +# FIXME: we keep getting bitten by quoted arguments; a more general fix +# that detects unbalanced quotes in FLIBS should be implemented +# and (ugh) tested at some point. +case $ac_fc_v_output in + # If we are using xlf then replace all the commas with spaces. + *xlfentry*) + ac_fc_v_output=`echo $ac_fc_v_output | sed 's/,/ /g'` ;; + + # With Intel ifc, ignore the quoted -mGLOB_options_string stuff (quoted + # $LIBS confuse us, and the libraries appear later in the output anyway). + *mGLOB_options_string*) + ac_fc_v_output=`echo $ac_fc_v_output | sed 's/"-mGLOB[^"]*"/ /g'` ;; + + # Portland Group compiler has singly- or doubly-quoted -cmdline argument + # Singly-quoted arguments were reported for versions 5.2-4 and 6.0-4. + # Doubly-quoted arguments were reported for "PGF90/x86 Linux/x86 5.0-2". + *-cmdline\ * | *-ignore\ * | *-def\ *) + ac_fc_v_output=`echo $ac_fc_v_output | sed "\ + s/-cmdline *'[^']*'/ /g; s/-cmdline *\"[^\"]*\"/ /g + s/-ignore *'[^']*'/ /g; s/-ignore *\"[^\"]*\"/ /g + s/-def *'[^']*'/ /g; s/-def *\"[^\"]*\"/ /g"` ;; + + # If we are using Cray Fortran then delete quotes. + *cft90*) + ac_fc_v_output=`echo $ac_fc_v_output | sed 's/"//g'` ;; +esac + + + +ac_cv_fc_libs= + +# Save positional arguments (if any) +ac_save_positional="$@" + +set X $ac_fc_v_output +while test $# != 1; do + shift + ac_arg=$1 + case $ac_arg in + [\\/]*.a | ?:[\\/]*.a) + ac_exists=false + for ac_i in $ac_cv_fc_libs; do + if test x"$ac_arg" = x"$ac_i"; then + ac_exists=true + break + fi + done + + if test x"$ac_exists" = xtrue; then : + +else + ac_cv_fc_libs="$ac_cv_fc_libs $ac_arg" +fi + ;; + -bI:*) + ac_exists=false + for ac_i in $ac_cv_fc_libs; do + if test x"$ac_arg" = x"$ac_i"; then + ac_exists=true + break + fi + done + + if test x"$ac_exists" = xtrue; then : + +else + if test "$ac_compiler_gnu" = yes; then + for ac_link_opt in $ac_arg; do + ac_cv_fc_libs="$ac_cv_fc_libs -Xlinker $ac_link_opt" + done +else + ac_cv_fc_libs="$ac_cv_fc_libs $ac_arg" +fi +fi + ;; + # Ignore these flags. + -lang* | -lcrt*.o | -lc | -lgcc* | -lSystem | -libmil | -little \ + |-LANG:=* | -LIST:* | -LNO:* | -link) + ;; + -lkernel32) + test x"$CYGWIN" != xyes && ac_cv_fc_libs="$ac_cv_fc_libs $ac_arg" + ;; + -[LRuYz]) + # These flags, when seen by themselves, take an argument. + # We remove the space between option and argument and re-iterate + # unless we find an empty arg or a new option (starting with -) + case $2 in + "" | -*);; + *) + ac_arg="$ac_arg$2" + shift; shift + set X $ac_arg "$@" + ;; + esac + ;; + -YP,*) + for ac_j in `$as_echo "$ac_arg" | sed -e 's/-YP,/-L/;s/:/ -L/g'`; do + ac_exists=false + for ac_i in $ac_cv_fc_libs; do + if test x"$ac_j" = x"$ac_i"; then + ac_exists=true + break + fi + done + + if test x"$ac_exists" = xtrue; then : + +else + ac_arg="$ac_arg $ac_j" + ac_cv_fc_libs="$ac_cv_fc_libs $ac_j" +fi + done + ;; + -[lLR]*) + ac_exists=false + for ac_i in $ac_cv_fc_libs; do + if test x"$ac_arg" = x"$ac_i"; then + ac_exists=true + break + fi + done + + if test x"$ac_exists" = xtrue; then : + +else + ac_cv_fc_libs="$ac_cv_fc_libs $ac_arg" +fi + ;; + -zallextract*| -zdefaultextract) + ac_cv_fc_libs="$ac_cv_fc_libs $ac_arg" + ;; + # Ignore everything else. + esac +done +# restore positional arguments +set X $ac_save_positional; shift + +# We only consider "LD_RUN_PATH" on Solaris systems. If this is seen, +# then we insist that the "run path" must be an absolute path (i.e. it +# must begin with a "/"). +case `(uname -sr) 2>/dev/null` in + "SunOS 5"*) + ac_ld_run_path=`$as_echo "$ac_fc_v_output" | + sed -n 's,^.*LD_RUN_PATH *= *\(/[^ ]*\).*$,-R\1,p'` + test "x$ac_ld_run_path" != x && + if test "$ac_compiler_gnu" = yes; then + for ac_link_opt in $ac_ld_run_path; do + ac_cv_fc_libs="$ac_cv_fc_libs -Xlinker $ac_link_opt" + done +else + ac_cv_fc_libs="$ac_cv_fc_libs $ac_ld_run_path" +fi + ;; +esac +fi # test "x$[]_AC_LANG_PREFIX[]LIBS" = "x" + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_fc_libs" >&5 +$as_echo "$ac_cv_fc_libs" >&6; } +FCLIBS="$ac_cv_fc_libs" + + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + +fi + +# ------------------------------------------------------------------------ +# Zoltan may be built via autotools, cmake, or our native makefile +# ------------------------------------------------------------------------ + +CPPFLAGS="-DAUTOTOOLS_BUILD ${CPPFLAGS}" + +# ------------------------------------------------------------------------ +# Perform substitutions in output files +# ------------------------------------------------------------------------ + + + +# ------------------------------------------------------------------------ +# Output files +# ------------------------------------------------------------------------ +# +ac_config_files="$ac_config_files Makefile src/Makefile src/driver/Makefile src/fdriver/Makefile Makefile.export.zoltan example/Makefile example/C/Makefile example/CPP/Makefile siMPI/Makefile siMPI/pyMPI/Makefile siMPI/pyMPI/siMPI/Makefile" + +# test/Large_Data/Makefile +# example/lib/Makefile +# example/C/Makefile +# example/C/sparse_matrix/Makefile +# example/CPP/Makefile +# Utilities/Makefile + +cat >confcache <<\_ACEOF +# This file is a shell script that caches the results of configure +# tests run on this system so they can be shared between configure +# scripts and configure runs, see configure's option --config-cache. +# It is not useful on other systems. If it contains results you don't +# want to keep, you may remove or edit it. +# +# config.status only pays attention to the cache file if you give it +# the --recheck option to rerun configure. +# +# `ac_cv_env_foo' variables (set or unset) will be overridden when +# loading this file, other *unset* `ac_cv_foo' will be assigned the +# following values. + +_ACEOF + +# The following way of writing the cache mishandles newlines in values, +# but we know of no workaround that is simple, portable, and efficient. +# So, we kill variables containing newlines. +# Ultrix sh set writes to stderr and can't be redirected directly, +# and sets the high bit in the cache file unless we assign to the vars. +( + for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do + eval ac_val=\$$ac_var + case $ac_val in #( + *${as_nl}*) + case $ac_var in #( + *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 +$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; + esac + case $ac_var in #( + _ | IFS | as_nl) ;; #( + BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( + *) { eval $ac_var=; unset $ac_var;} ;; + esac ;; + esac + done + + (set) 2>&1 | + case $as_nl`(ac_space=' '; set) 2>&1` in #( + *${as_nl}ac_space=\ *) + # `set' does not quote correctly, so add quotes: double-quote + # substitution turns \\\\ into \\, and sed turns \\ into \. + sed -n \ + "s/'/'\\\\''/g; + s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" + ;; #( + *) + # `set' quotes correctly as required by POSIX, so do not add quotes. + sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" + ;; + esac | + sort +) | + sed ' + /^ac_cv_env_/b end + t clear + :clear + s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ + t end + s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ + :end' >>confcache +if diff "$cache_file" confcache >/dev/null 2>&1; then :; else + if test -w "$cache_file"; then + if test "x$cache_file" != "x/dev/null"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5 +$as_echo "$as_me: updating cache $cache_file" >&6;} + if test ! -f "$cache_file" || test -h "$cache_file"; then + cat confcache >"$cache_file" + else + case $cache_file in #( + */* | ?:*) + mv -f confcache "$cache_file"$$ && + mv -f "$cache_file"$$ "$cache_file" ;; #( + *) + mv -f confcache "$cache_file" ;; + esac + fi + fi + else + { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5 +$as_echo "$as_me: not updating unwritable cache $cache_file" >&6;} + fi +fi +rm -f confcache + +test "x$prefix" = xNONE && prefix=$ac_default_prefix +# Let make expand exec_prefix. +test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' + +DEFS=-DHAVE_CONFIG_H + +ac_libobjs= +ac_ltlibobjs= +U= +for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue + # 1. Remove the extension, and $U if already installed. + ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' + ac_i=`$as_echo "$ac_i" | sed "$ac_script"` + # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR + # will be set to the directory where LIBOBJS objects are built. + as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext" + as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo' +done +LIBOBJS=$ac_libobjs + +LTLIBOBJS=$ac_ltlibobjs + + +if test -z "${MAINTAINER_MODE_TRUE}" && test -z "${MAINTAINER_MODE_FALSE}"; then + as_fn_error $? "conditional \"MAINTAINER_MODE\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${HOST_CYGWIN_TRUE}" && test -z "${HOST_CYGWIN_FALSE}"; then + as_fn_error $? "conditional \"HOST_CYGWIN\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${HOST_LINUX_TRUE}" && test -z "${HOST_LINUX_FALSE}"; then + as_fn_error $? "conditional \"HOST_LINUX\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${HOST_SOLARIS_TRUE}" && test -z "${HOST_SOLARIS_FALSE}"; then + as_fn_error $? "conditional \"HOST_SOLARIS\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${HOST_CYGWIN_TRUE}" && test -z "${HOST_CYGWIN_FALSE}"; then + as_fn_error $? "conditional \"HOST_CYGWIN\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${HOST_LINUX_TRUE}" && test -z "${HOST_LINUX_FALSE}"; then + as_fn_error $? "conditional \"HOST_LINUX\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${HOST_SOLARIS_TRUE}" && test -z "${HOST_SOLARIS_FALSE}"; then + as_fn_error $? "conditional \"HOST_SOLARIS\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi + if test -n "$EXEEXT"; then + am__EXEEXT_TRUE= + am__EXEEXT_FALSE='#' +else + am__EXEEXT_TRUE='#' + am__EXEEXT_FALSE= +fi + +if test -z "${BUILD_ZOLTAN_F90_INTERFACE_TRUE}" && test -z "${BUILD_ZOLTAN_F90_INTERFACE_FALSE}"; then + as_fn_error $? "conditional \"BUILD_ZOLTAN_F90_INTERFACE\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${HAVE_ZOLTAN_CPPDRIVER_TRUE}" && test -z "${HAVE_ZOLTAN_CPPDRIVER_FALSE}"; then + as_fn_error $? "conditional \"HAVE_ZOLTAN_CPPDRIVER\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${HAVE_MPI_TRUE}" && test -z "${HAVE_MPI_FALSE}"; then + as_fn_error $? "conditional \"HAVE_MPI\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${AMDEP_TRUE}" && test -z "${AMDEP_FALSE}"; then + as_fn_error $? "conditional \"AMDEP\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${am__fastdepCC_TRUE}" && test -z "${am__fastdepCC_FALSE}"; then + as_fn_error $? "conditional \"am__fastdepCC\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${am__fastdepCXX_TRUE}" && test -z "${am__fastdepCXX_FALSE}"; then + as_fn_error $? "conditional \"am__fastdepCXX\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${USE_ALTERNATE_AR_TRUE}" && test -z "${USE_ALTERNATE_AR_FALSE}"; then + as_fn_error $? "conditional \"USE_ALTERNATE_AR\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${USE_ALTERNATE_AR_TRUE}" && test -z "${USE_ALTERNATE_AR_FALSE}"; then + as_fn_error $? "conditional \"USE_ALTERNATE_AR\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${USING_EXPORT_MAKEFILES_TRUE}" && test -z "${USING_EXPORT_MAKEFILES_FALSE}"; then + as_fn_error $? "conditional \"USING_EXPORT_MAKEFILES\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${USING_PERL_TRUE}" && test -z "${USING_PERL_FALSE}"; then + as_fn_error $? "conditional \"USING_PERL\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${USING_GNUMAKE_TRUE}" && test -z "${USING_GNUMAKE_FALSE}"; then + as_fn_error $? "conditional \"USING_GNUMAKE\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${BUILD_GZIP_TRUE}" && test -z "${BUILD_GZIP_FALSE}"; then + as_fn_error $? "conditional \"BUILD_GZIP\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${BUILD_PARMETIS_TRUE}" && test -z "${BUILD_PARMETIS_FALSE}"; then + as_fn_error $? "conditional \"BUILD_PARMETIS\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${BUILD_SCOTCH_TRUE}" && test -z "${BUILD_SCOTCH_FALSE}"; then + as_fn_error $? "conditional \"BUILD_SCOTCH\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${BUILD_TESTS_TRUE}" && test -z "${BUILD_TESTS_FALSE}"; then + as_fn_error $? "conditional \"BUILD_TESTS\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${BUILD_EXAMPLES_TRUE}" && test -z "${BUILD_EXAMPLES_FALSE}"; then + as_fn_error $? "conditional \"BUILD_EXAMPLES\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${SUB_TEST_TRUE}" && test -z "${SUB_TEST_FALSE}"; then + as_fn_error $? "conditional \"SUB_TEST\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${SUB_EXAMPLE_TRUE}" && test -z "${SUB_EXAMPLE_FALSE}"; then + as_fn_error $? "conditional \"SUB_EXAMPLE\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${F90_MODULE_LOWERCASE_TRUE}" && test -z "${F90_MODULE_LOWERCASE_FALSE}"; then + as_fn_error $? "conditional \"F90_MODULE_LOWERCASE\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${NAG_F90_COMPILER_TRUE}" && test -z "${NAG_F90_COMPILER_FALSE}"; then + as_fn_error $? "conditional \"NAG_F90_COMPILER\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi + +: "${CONFIG_STATUS=./config.status}" +ac_write_fail=0 +ac_clean_files_save=$ac_clean_files +ac_clean_files="$ac_clean_files $CONFIG_STATUS" +{ $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5 +$as_echo "$as_me: creating $CONFIG_STATUS" >&6;} +as_write_fail=0 +cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1 +#! $SHELL +# Generated by $as_me. +# Run this file to recreate the current configuration. +# Compiler output produced by configure, useful for debugging +# configure, is in config.log if it exists. + +debug=false +ac_cs_recheck=false +ac_cs_silent=false + +SHELL=\${CONFIG_SHELL-$SHELL} +export SHELL +_ASEOF +cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1 +## -------------------- ## +## M4sh Initialization. ## +## -------------------- ## + +# Be more Bourne compatible +DUALCASE=1; export DUALCASE # for MKS sh +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in #( + *posix*) : + set -o posix ;; #( + *) : + ;; +esac +fi + + +as_nl=' +' +export as_nl +# Printing a long string crashes Solaris 7 /usr/bin/printf. +as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo +# Prefer a ksh shell builtin over an external printf program on Solaris, +# but without wasting forks for bash or zsh. +if test -z "$BASH_VERSION$ZSH_VERSION" \ + && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='print -r --' + as_echo_n='print -rn --' +elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='printf %s\n' + as_echo_n='printf %s' +else + if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then + as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' + as_echo_n='/usr/ucb/echo -n' + else + as_echo_body='eval expr "X$1" : "X\\(.*\\)"' + as_echo_n_body='eval + arg=$1; + case $arg in #( + *"$as_nl"*) + expr "X$arg" : "X\\(.*\\)$as_nl"; + arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; + esac; + expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" + ' + export as_echo_n_body + as_echo_n='sh -c $as_echo_n_body as_echo' + fi + export as_echo_body + as_echo='sh -c $as_echo_body as_echo' +fi + +# The user is always right. +if test "${PATH_SEPARATOR+set}" != set; then + PATH_SEPARATOR=: + (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { + (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || + PATH_SEPARATOR=';' + } +fi + + +# IFS +# We need space, tab and new line, in precisely that order. Quoting is +# there to prevent editors from complaining about space-tab. +# (If _AS_PATH_WALK were called with IFS unset, it would disable word +# splitting by setting IFS to empty value.) +IFS=" "" $as_nl" + +# Find who we are. Look in the path if we contain no directory separator. +as_myself= +case $0 in #(( + *[\\/]* ) as_myself=$0 ;; + *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break + done +IFS=$as_save_IFS + + ;; +esac +# We did not find ourselves, most probably we were run as `sh COMMAND' +# in which case we are not to be found in the path. +if test "x$as_myself" = x; then + as_myself=$0 +fi +if test ! -f "$as_myself"; then + $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 + exit 1 +fi + +# Unset variables that we do not need and which cause bugs (e.g. in +# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" +# suppresses any "Segmentation fault" message there. '((' could +# trigger a bug in pdksh 5.2.14. +for as_var in BASH_ENV ENV MAIL MAILPATH +do eval test x\${$as_var+set} = xset \ + && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : +done +PS1='$ ' +PS2='> ' +PS4='+ ' + +# NLS nuisances. +LC_ALL=C +export LC_ALL +LANGUAGE=C +export LANGUAGE + +# CDPATH. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + + +# as_fn_error STATUS ERROR [LINENO LOG_FD] +# ---------------------------------------- +# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are +# provided, also output the error to LOG_FD, referencing LINENO. Then exit the +# script with STATUS, using 1 if that was 0. +as_fn_error () +{ + as_status=$1; test $as_status -eq 0 && as_status=1 + if test "$4"; then + as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 + fi + $as_echo "$as_me: error: $2" >&2 + as_fn_exit $as_status +} # as_fn_error + + +# as_fn_set_status STATUS +# ----------------------- +# Set $? to STATUS, without forking. +as_fn_set_status () +{ + return $1 +} # as_fn_set_status + +# as_fn_exit STATUS +# ----------------- +# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. +as_fn_exit () +{ + set +e + as_fn_set_status $1 + exit $1 +} # as_fn_exit + +# as_fn_unset VAR +# --------------- +# Portably unset VAR. +as_fn_unset () +{ + { eval $1=; unset $1;} +} +as_unset=as_fn_unset +# as_fn_append VAR VALUE +# ---------------------- +# Append the text in VALUE to the end of the definition contained in VAR. Take +# advantage of any shell optimizations that allow amortized linear growth over +# repeated appends, instead of the typical quadratic growth present in naive +# implementations. +if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : + eval 'as_fn_append () + { + eval $1+=\$2 + }' +else + as_fn_append () + { + eval $1=\$$1\$2 + } +fi # as_fn_append + +# as_fn_arith ARG... +# ------------------ +# Perform arithmetic evaluation on the ARGs, and store the result in the +# global $as_val. Take advantage of shells that can avoid forks. The arguments +# must be portable across $(()) and expr. +if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : + eval 'as_fn_arith () + { + as_val=$(( $* )) + }' +else + as_fn_arith () + { + as_val=`expr "$@" || test $? -eq 1` + } +fi # as_fn_arith + + +if expr a : '\(a\)' >/dev/null 2>&1 && + test "X`expr 00001 : '.*\(...\)'`" = X001; then + as_expr=expr +else + as_expr=false +fi + +if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then + as_basename=basename +else + as_basename=false +fi + +if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then + as_dirname=dirname +else + as_dirname=false +fi + +as_me=`$as_basename -- "$0" || +$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X/"$0" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ + s//\1/ + q + } + /^X\/\(\/\/\)$/{ + s//\1/ + q + } + /^X\/\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + +# Avoid depending upon Character Ranges. +as_cr_letters='abcdefghijklmnopqrstuvwxyz' +as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' +as_cr_Letters=$as_cr_letters$as_cr_LETTERS +as_cr_digits='0123456789' +as_cr_alnum=$as_cr_Letters$as_cr_digits + +ECHO_C= ECHO_N= ECHO_T= +case `echo -n x` in #((((( +-n*) + case `echo 'xy\c'` in + *c*) ECHO_T=' ';; # ECHO_T is single tab character. + xy) ECHO_C='\c';; + *) echo `echo ksh88 bug on AIX 6.1` > /dev/null + ECHO_T=' ';; + esac;; +*) + ECHO_N='-n';; +esac + +rm -f conf$$ conf$$.exe conf$$.file +if test -d conf$$.dir; then + rm -f conf$$.dir/conf$$.file +else + rm -f conf$$.dir + mkdir conf$$.dir 2>/dev/null +fi +if (echo >conf$$.file) 2>/dev/null; then + if ln -s conf$$.file conf$$ 2>/dev/null; then + as_ln_s='ln -s' + # ... but there are two gotchas: + # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. + # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. + # In both cases, we have to default to `cp -p'. + ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || + as_ln_s='cp -p' + elif ln conf$$.file conf$$ 2>/dev/null; then + as_ln_s=ln + else + as_ln_s='cp -p' + fi +else + as_ln_s='cp -p' +fi +rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file +rmdir conf$$.dir 2>/dev/null + + +# as_fn_mkdir_p +# ------------- +# Create "$as_dir" as a directory, including parents if necessary. +as_fn_mkdir_p () +{ + + case $as_dir in #( + -*) as_dir=./$as_dir;; + esac + test -d "$as_dir" || eval $as_mkdir_p || { + as_dirs= + while :; do + case $as_dir in #( + *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( + *) as_qdir=$as_dir;; + esac + as_dirs="'$as_qdir' $as_dirs" + as_dir=`$as_dirname -- "$as_dir" || +$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$as_dir" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + test -d "$as_dir" && break + done + test -z "$as_dirs" || eval "mkdir $as_dirs" + } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" + + +} # as_fn_mkdir_p +if mkdir -p . 2>/dev/null; then + as_mkdir_p='mkdir -p "$as_dir"' +else + test -d ./-p && rmdir ./-p + as_mkdir_p=false +fi + +if test -x / >/dev/null 2>&1; then + as_test_x='test -x' +else + if ls -dL / >/dev/null 2>&1; then + as_ls_L_option=L + else + as_ls_L_option= + fi + as_test_x=' + eval sh -c '\'' + if test -d "$1"; then + test -d "$1/."; + else + case $1 in #( + -*)set "./$1";; + esac; + case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in #(( + ???[sx]*):;;*)false;;esac;fi + '\'' sh + ' +fi +as_executable_p=$as_test_x + +# Sed expression to map a string onto a valid CPP name. +as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" + +# Sed expression to map a string onto a valid variable name. +as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" + + +exec 6>&1 +## ----------------------------------- ## +## Main body of $CONFIG_STATUS script. ## +## ----------------------------------- ## +_ASEOF +test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1 + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +# Save the log message, to keep $0 and so on meaningful, and to +# report actual input values of CONFIG_FILES etc. instead of their +# values after options handling. +ac_log=" +This file was extended by Zoltan $as_me 3.6, which was +generated by GNU Autoconf 2.68. Invocation command line was + + CONFIG_FILES = $CONFIG_FILES + CONFIG_HEADERS = $CONFIG_HEADERS + CONFIG_LINKS = $CONFIG_LINKS + CONFIG_COMMANDS = $CONFIG_COMMANDS + $ $0 $@ + +on `(hostname || uname -n) 2>/dev/null | sed 1q` +" + +_ACEOF + +case $ac_config_files in *" +"*) set x $ac_config_files; shift; ac_config_files=$*;; +esac + +case $ac_config_headers in *" +"*) set x $ac_config_headers; shift; ac_config_headers=$*;; +esac + + +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +# Files that config.status was made for. +config_files="$ac_config_files" +config_headers="$ac_config_headers" +config_commands="$ac_config_commands" + +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +ac_cs_usage="\ +\`$as_me' instantiates files and other configuration actions +from templates according to the current configuration. Unless the files +and actions are specified as TAGs, all are instantiated by default. + +Usage: $0 [OPTION]... [TAG]... + + -h, --help print this help, then exit + -V, --version print version number and configuration settings, then exit + --config print configuration, then exit + -q, --quiet, --silent + do not print progress messages + -d, --debug don't remove temporary files + --recheck update $as_me by reconfiguring in the same conditions + --file=FILE[:TEMPLATE] + instantiate the configuration file FILE + --header=FILE[:TEMPLATE] + instantiate the configuration header FILE + +Configuration files: +$config_files + +Configuration headers: +$config_headers + +Configuration commands: +$config_commands + +Report bugs to ." + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" +ac_cs_version="\\ +Zoltan config.status 3.6 +configured by $0, generated by GNU Autoconf 2.68, + with options \\"\$ac_cs_config\\" + +Copyright (C) 2010 Free Software Foundation, Inc. +This config.status script is free software; the Free Software Foundation +gives unlimited permission to copy, distribute and modify it." + +ac_pwd='$ac_pwd' +srcdir='$srcdir' +INSTALL='$INSTALL' +MKDIR_P='$MKDIR_P' +AWK='$AWK' +test -n "\$AWK" || AWK=awk +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +# The default lists apply if the user does not specify any file. +ac_need_defaults=: +while test $# != 0 +do + case $1 in + --*=?*) + ac_option=`expr "X$1" : 'X\([^=]*\)='` + ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` + ac_shift=: + ;; + --*=) + ac_option=`expr "X$1" : 'X\([^=]*\)='` + ac_optarg= + ac_shift=: + ;; + *) + ac_option=$1 + ac_optarg=$2 + ac_shift=shift + ;; + esac + + case $ac_option in + # Handling of the options. + -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) + ac_cs_recheck=: ;; + --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) + $as_echo "$ac_cs_version"; exit ;; + --config | --confi | --conf | --con | --co | --c ) + $as_echo "$ac_cs_config"; exit ;; + --debug | --debu | --deb | --de | --d | -d ) + debug=: ;; + --file | --fil | --fi | --f ) + $ac_shift + case $ac_optarg in + *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; + '') as_fn_error $? "missing file argument" ;; + esac + as_fn_append CONFIG_FILES " '$ac_optarg'" + ac_need_defaults=false;; + --header | --heade | --head | --hea ) + $ac_shift + case $ac_optarg in + *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; + esac + as_fn_append CONFIG_HEADERS " '$ac_optarg'" + ac_need_defaults=false;; + --he | --h) + # Conflict between --help and --header + as_fn_error $? "ambiguous option: \`$1' +Try \`$0 --help' for more information.";; + --help | --hel | -h ) + $as_echo "$ac_cs_usage"; exit ;; + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil | --si | --s) + ac_cs_silent=: ;; + + # This is an error. + -*) as_fn_error $? "unrecognized option: \`$1' +Try \`$0 --help' for more information." ;; + + *) as_fn_append ac_config_targets " $1" + ac_need_defaults=false ;; + + esac + shift +done + +ac_configure_extra_args= + +if $ac_cs_silent; then + exec 6>/dev/null + ac_configure_extra_args="$ac_configure_extra_args --silent" +fi + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +if \$ac_cs_recheck; then + set X '$SHELL' '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion + shift + \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6 + CONFIG_SHELL='$SHELL' + export CONFIG_SHELL + exec "\$@" +fi + +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +exec 5>>config.log +{ + echo + sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX +## Running $as_me. ## +_ASBOX + $as_echo "$ac_log" +} >&5 + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +# +# INIT-COMMANDS +# +AMDEP_TRUE="$AMDEP_TRUE" ac_aux_dir="$ac_aux_dir" + +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 + +# Handling of arguments. +for ac_config_target in $ac_config_targets +do + case $ac_config_target in + "src/include/Zoltan_config.h") CONFIG_HEADERS="$CONFIG_HEADERS src/include/Zoltan_config.h:src/include/Zoltan_config.h.in" ;; + "depfiles") CONFIG_COMMANDS="$CONFIG_COMMANDS depfiles" ;; + "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;; + "src/Makefile") CONFIG_FILES="$CONFIG_FILES src/Makefile" ;; + "src/driver/Makefile") CONFIG_FILES="$CONFIG_FILES src/driver/Makefile" ;; + "src/fdriver/Makefile") CONFIG_FILES="$CONFIG_FILES src/fdriver/Makefile" ;; + "Makefile.export.zoltan") CONFIG_FILES="$CONFIG_FILES Makefile.export.zoltan" ;; + "example/Makefile") CONFIG_FILES="$CONFIG_FILES example/Makefile" ;; + "example/C/Makefile") CONFIG_FILES="$CONFIG_FILES example/C/Makefile" ;; + "example/CPP/Makefile") CONFIG_FILES="$CONFIG_FILES example/CPP/Makefile" ;; + "siMPI/Makefile") CONFIG_FILES="$CONFIG_FILES siMPI/Makefile" ;; + "siMPI/pyMPI/Makefile") CONFIG_FILES="$CONFIG_FILES siMPI/pyMPI/Makefile" ;; + "siMPI/pyMPI/siMPI/Makefile") CONFIG_FILES="$CONFIG_FILES siMPI/pyMPI/siMPI/Makefile" ;; + + *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; + esac +done + + +# If the user did not use the arguments to specify the items to instantiate, +# then the envvar interface is used. Set only those that are not. +# We use the long form for the default assignment because of an extremely +# bizarre bug on SunOS 4.1.3. +if $ac_need_defaults; then + test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files + test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers + test "${CONFIG_COMMANDS+set}" = set || CONFIG_COMMANDS=$config_commands +fi + +# Have a temporary directory for convenience. Make it in the build tree +# simply because there is no reason against having it here, and in addition, +# creating and moving files from /tmp can sometimes cause problems. +# Hook for its removal unless debugging. +# Note that there is a small window in which the directory will not be cleaned: +# after its creation but before its name has been assigned to `$tmp'. +$debug || +{ + tmp= ac_tmp= + trap 'exit_status=$? + : "${ac_tmp:=$tmp}" + { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status +' 0 + trap 'as_fn_exit 1' 1 2 13 15 +} +# Create a (secure) tmp directory for tmp files. + +{ + tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && + test -d "$tmp" +} || +{ + tmp=./conf$$-$RANDOM + (umask 077 && mkdir "$tmp") +} || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5 +ac_tmp=$tmp + +# Set up the scripts for CONFIG_FILES section. +# No need to generate them if there are no CONFIG_FILES. +# This happens for instance with `./config.status config.h'. +if test -n "$CONFIG_FILES"; then + + +ac_cr=`echo X | tr X '\015'` +# On cygwin, bash can eat \r inside `` if the user requested igncr. +# But we know of no other shell where ac_cr would be empty at this +# point, so we can use a bashism as a fallback. +if test "x$ac_cr" = x; then + eval ac_cr=\$\'\\r\' +fi +ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null` +if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then + ac_cs_awk_cr='\\r' +else + ac_cs_awk_cr=$ac_cr +fi + +echo 'BEGIN {' >"$ac_tmp/subs1.awk" && +_ACEOF + + +{ + echo "cat >conf$$subs.awk <<_ACEOF" && + echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' && + echo "_ACEOF" +} >conf$$subs.sh || + as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 +ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'` +ac_delim='%!_!# ' +for ac_last_try in false false false false false :; do + . ./conf$$subs.sh || + as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 + + ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X` + if test $ac_delim_n = $ac_delim_num; then + break + elif $ac_last_try; then + as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 + else + ac_delim="$ac_delim!$ac_delim _$ac_delim!! " + fi +done +rm -f conf$$subs.sh + +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK && +_ACEOF +sed -n ' +h +s/^/S["/; s/!.*/"]=/ +p +g +s/^[^!]*!// +:repl +t repl +s/'"$ac_delim"'$// +t delim +:nl +h +s/\(.\{148\}\)..*/\1/ +t more1 +s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/ +p +n +b repl +:more1 +s/["\\]/\\&/g; s/^/"/; s/$/"\\/ +p +g +s/.\{148\}// +t nl +:delim +h +s/\(.\{148\}\)..*/\1/ +t more2 +s/["\\]/\\&/g; s/^/"/; s/$/"/ +p +b +:more2 +s/["\\]/\\&/g; s/^/"/; s/$/"\\/ +p +g +s/.\{148\}// +t delim +' >$CONFIG_STATUS || ac_write_fail=1 +rm -f conf$$subs.awk +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +_ACAWK +cat >>"\$ac_tmp/subs1.awk" <<_ACAWK && + for (key in S) S_is_set[key] = 1 + FS = "" + +} +{ + line = $ 0 + nfields = split(line, field, "@") + substed = 0 + len = length(field[1]) + for (i = 2; i < nfields; i++) { + key = field[i] + keylen = length(key) + if (S_is_set[key]) { + value = S[key] + line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3) + len += length(value) + length(field[++i]) + substed = 1 + } else + len += 1 + keylen + } + + print line +} + +_ACAWK +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then + sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" +else + cat +fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \ + || as_fn_error $? "could not setup config files machinery" "$LINENO" 5 +_ACEOF + +# VPATH may cause trouble with some makes, so we remove sole $(srcdir), +# ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and +# trailing colons and then remove the whole line if VPATH becomes empty +# (actually we leave an empty line to preserve line numbers). +if test "x$srcdir" = x.; then + ac_vpsub='/^[ ]*VPATH[ ]*=[ ]*/{ +h +s/// +s/^/:/ +s/[ ]*$/:/ +s/:\$(srcdir):/:/g +s/:\${srcdir}:/:/g +s/:@srcdir@:/:/g +s/^:*// +s/:*$// +x +s/\(=[ ]*\).*/\1/ +G +s/\n// +s/^[^=]*=[ ]*$// +}' +fi + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +fi # test -n "$CONFIG_FILES" + +# Set up the scripts for CONFIG_HEADERS section. +# No need to generate them if there are no CONFIG_HEADERS. +# This happens for instance with `./config.status Makefile'. +if test -n "$CONFIG_HEADERS"; then +cat >"$ac_tmp/defines.awk" <<\_ACAWK || +BEGIN { +_ACEOF + +# Transform confdefs.h into an awk script `defines.awk', embedded as +# here-document in config.status, that substitutes the proper values into +# config.h.in to produce config.h. + +# Create a delimiter string that does not exist in confdefs.h, to ease +# handling of long lines. +ac_delim='%!_!# ' +for ac_last_try in false false :; do + ac_tt=`sed -n "/$ac_delim/p" confdefs.h` + if test -z "$ac_tt"; then + break + elif $ac_last_try; then + as_fn_error $? "could not make $CONFIG_HEADERS" "$LINENO" 5 + else + ac_delim="$ac_delim!$ac_delim _$ac_delim!! " + fi +done + +# For the awk script, D is an array of macro values keyed by name, +# likewise P contains macro parameters if any. Preserve backslash +# newline sequences. + +ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]* +sed -n ' +s/.\{148\}/&'"$ac_delim"'/g +t rset +:rset +s/^[ ]*#[ ]*define[ ][ ]*/ / +t def +d +:def +s/\\$// +t bsnl +s/["\\]/\\&/g +s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ +D["\1"]=" \3"/p +s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2"/p +d +:bsnl +s/["\\]/\\&/g +s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ +D["\1"]=" \3\\\\\\n"\\/p +t cont +s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p +t cont +d +:cont +n +s/.\{148\}/&'"$ac_delim"'/g +t clear +:clear +s/\\$// +t bsnlc +s/["\\]/\\&/g; s/^/"/; s/$/"/p +d +:bsnlc +s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p +b cont +' >$CONFIG_STATUS || ac_write_fail=1 + +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 + for (key in D) D_is_set[key] = 1 + FS = "" +} +/^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ { + line = \$ 0 + split(line, arg, " ") + if (arg[1] == "#") { + defundef = arg[2] + mac1 = arg[3] + } else { + defundef = substr(arg[1], 2) + mac1 = arg[2] + } + split(mac1, mac2, "(") #) + macro = mac2[1] + prefix = substr(line, 1, index(line, defundef) - 1) + if (D_is_set[macro]) { + # Preserve the white space surrounding the "#". + print prefix "define", macro P[macro] D[macro] + next + } else { + # Replace #undef with comments. This is necessary, for example, + # in the case of _POSIX_SOURCE, which is predefined and required + # on some systems where configure will not decide to define it. + if (defundef == "undef") { + print "/*", prefix defundef, macro, "*/" + next + } + } +} +{ print } +_ACAWK +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 + as_fn_error $? "could not setup config headers machinery" "$LINENO" 5 +fi # test -n "$CONFIG_HEADERS" + + +eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS :C $CONFIG_COMMANDS" +shift +for ac_tag +do + case $ac_tag in + :[FHLC]) ac_mode=$ac_tag; continue;; + esac + case $ac_mode$ac_tag in + :[FHL]*:*);; + :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;; + :[FH]-) ac_tag=-:-;; + :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; + esac + ac_save_IFS=$IFS + IFS=: + set x $ac_tag + IFS=$ac_save_IFS + shift + ac_file=$1 + shift + + case $ac_mode in + :L) ac_source=$1;; + :[FH]) + ac_file_inputs= + for ac_f + do + case $ac_f in + -) ac_f="$ac_tmp/stdin";; + *) # Look for the file first in the build tree, then in the source tree + # (if the path is not absolute). The absolute path cannot be DOS-style, + # because $ac_f cannot contain `:'. + test -f "$ac_f" || + case $ac_f in + [\\/$]*) false;; + *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; + esac || + as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;; + esac + case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac + as_fn_append ac_file_inputs " '$ac_f'" + done + + # Let's still pretend it is `configure' which instantiates (i.e., don't + # use $as_me), people would be surprised to read: + # /* config.h. Generated by config.status. */ + configure_input='Generated from '` + $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' + `' by configure.' + if test x"$ac_file" != x-; then + configure_input="$ac_file. $configure_input" + { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 +$as_echo "$as_me: creating $ac_file" >&6;} + fi + # Neutralize special characters interpreted by sed in replacement strings. + case $configure_input in #( + *\&* | *\|* | *\\* ) + ac_sed_conf_input=`$as_echo "$configure_input" | + sed 's/[\\\\&|]/\\\\&/g'`;; #( + *) ac_sed_conf_input=$configure_input;; + esac + + case $ac_tag in + *:-:* | *:-) cat >"$ac_tmp/stdin" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; + esac + ;; + esac + + ac_dir=`$as_dirname -- "$ac_file" || +$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$ac_file" : 'X\(//\)[^/]' \| \ + X"$ac_file" : 'X\(//\)$' \| \ + X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$ac_file" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + as_dir="$ac_dir"; as_fn_mkdir_p + ac_builddir=. + +case "$ac_dir" in +.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; +*) + ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` + # A ".." for each directory in $ac_dir_suffix. + ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` + case $ac_top_builddir_sub in + "") ac_top_builddir_sub=. ac_top_build_prefix= ;; + *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; + esac ;; +esac +ac_abs_top_builddir=$ac_pwd +ac_abs_builddir=$ac_pwd$ac_dir_suffix +# for backward compatibility: +ac_top_builddir=$ac_top_build_prefix + +case $srcdir in + .) # We are building in place. + ac_srcdir=. + ac_top_srcdir=$ac_top_builddir_sub + ac_abs_top_srcdir=$ac_pwd ;; + [\\/]* | ?:[\\/]* ) # Absolute name. + ac_srcdir=$srcdir$ac_dir_suffix; + ac_top_srcdir=$srcdir + ac_abs_top_srcdir=$srcdir ;; + *) # Relative name. + ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix + ac_top_srcdir=$ac_top_build_prefix$srcdir + ac_abs_top_srcdir=$ac_pwd/$srcdir ;; +esac +ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix + + + case $ac_mode in + :F) + # + # CONFIG_FILE + # + + case $INSTALL in + [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;; + *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;; + esac + ac_MKDIR_P=$MKDIR_P + case $MKDIR_P in + [\\/$]* | ?:[\\/]* ) ;; + */*) ac_MKDIR_P=$ac_top_build_prefix$MKDIR_P ;; + esac +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +# If the template does not know about datarootdir, expand it. +# FIXME: This hack should be removed a few years after 2.60. +ac_datarootdir_hack=; ac_datarootdir_seen= +ac_sed_dataroot=' +/datarootdir/ { + p + q +} +/@datadir@/p +/@docdir@/p +/@infodir@/p +/@localedir@/p +/@mandir@/p' +case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in +*datarootdir*) ac_datarootdir_seen=yes;; +*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 +$as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 + ac_datarootdir_hack=' + s&@datadir@&$datadir&g + s&@docdir@&$docdir&g + s&@infodir@&$infodir&g + s&@localedir@&$localedir&g + s&@mandir@&$mandir&g + s&\\\${datarootdir}&$datarootdir&g' ;; +esac +_ACEOF + +# Neutralize VPATH when `$srcdir' = `.'. +# Shell code in configure.ac might set extrasub. +# FIXME: do we really want to maintain this feature? +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +ac_sed_extra="$ac_vpsub +$extrasub +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +:t +/@[a-zA-Z_][a-zA-Z_0-9]*@/!b +s|@configure_input@|$ac_sed_conf_input|;t t +s&@top_builddir@&$ac_top_builddir_sub&;t t +s&@top_build_prefix@&$ac_top_build_prefix&;t t +s&@srcdir@&$ac_srcdir&;t t +s&@abs_srcdir@&$ac_abs_srcdir&;t t +s&@top_srcdir@&$ac_top_srcdir&;t t +s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t +s&@builddir@&$ac_builddir&;t t +s&@abs_builddir@&$ac_abs_builddir&;t t +s&@abs_top_builddir@&$ac_abs_top_builddir&;t t +s&@INSTALL@&$ac_INSTALL&;t t +s&@MKDIR_P@&$ac_MKDIR_P&;t t +$ac_datarootdir_hack +" +eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \ + >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + +test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && + { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } && + { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \ + "$ac_tmp/out"`; test -z "$ac_out"; } && + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' +which seems to be undefined. Please make sure it is defined" >&5 +$as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' +which seems to be undefined. Please make sure it is defined" >&2;} + + rm -f "$ac_tmp/stdin" + case $ac_file in + -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";; + *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";; + esac \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + ;; + :H) + # + # CONFIG_HEADER + # + if test x"$ac_file" != x-; then + { + $as_echo "/* $configure_input */" \ + && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" + } >"$ac_tmp/config.h" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then + { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5 +$as_echo "$as_me: $ac_file is unchanged" >&6;} + else + rm -f "$ac_file" + mv "$ac_tmp/config.h" "$ac_file" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + fi + else + $as_echo "/* $configure_input */" \ + && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \ + || as_fn_error $? "could not create -" "$LINENO" 5 + fi +# Compute "$ac_file"'s index in $config_headers. +_am_arg="$ac_file" +_am_stamp_count=1 +for _am_header in $config_headers :; do + case $_am_header in + $_am_arg | $_am_arg:* ) + break ;; + * ) + _am_stamp_count=`expr $_am_stamp_count + 1` ;; + esac +done +echo "timestamp for $_am_arg" >`$as_dirname -- "$_am_arg" || +$as_expr X"$_am_arg" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$_am_arg" : 'X\(//\)[^/]' \| \ + X"$_am_arg" : 'X\(//\)$' \| \ + X"$_am_arg" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$_am_arg" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'`/stamp-h$_am_stamp_count + ;; + + :C) { $as_echo "$as_me:${as_lineno-$LINENO}: executing $ac_file commands" >&5 +$as_echo "$as_me: executing $ac_file commands" >&6;} + ;; + esac + + + case $ac_file$ac_mode in + "depfiles":C) test x"$AMDEP_TRUE" != x"" || { + # Autoconf 2.62 quotes --file arguments for eval, but not when files + # are listed without --file. Let's play safe and only enable the eval + # if we detect the quoting. + case $CONFIG_FILES in + *\'*) eval set x "$CONFIG_FILES" ;; + *) set x $CONFIG_FILES ;; + esac + shift + for mf + do + # Strip MF so we end up with the name of the file. + mf=`echo "$mf" | sed -e 's/:.*$//'` + # Check whether this is an Automake generated Makefile or not. + # We used to match only the files named `Makefile.in', but + # some people rename them; so instead we look at the file content. + # Grep'ing the first line is not enough: some people post-process + # each Makefile.in and add a new line on top of each file to say so. + # Grep'ing the whole file is not good either: AIX grep has a line + # limit of 2048, but all sed's we know have understand at least 4000. + if sed -n 's,^#.*generated by automake.*,X,p' "$mf" | grep X >/dev/null 2>&1; then + dirpart=`$as_dirname -- "$mf" || +$as_expr X"$mf" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$mf" : 'X\(//\)[^/]' \| \ + X"$mf" : 'X\(//\)$' \| \ + X"$mf" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$mf" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + else + continue + fi + # Extract the definition of DEPDIR, am__include, and am__quote + # from the Makefile without running `make'. + DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"` + test -z "$DEPDIR" && continue + am__include=`sed -n 's/^am__include = //p' < "$mf"` + test -z "am__include" && continue + am__quote=`sed -n 's/^am__quote = //p' < "$mf"` + # When using ansi2knr, U may be empty or an underscore; expand it + U=`sed -n 's/^U = //p' < "$mf"` + # Find all dependency output files, they are included files with + # $(DEPDIR) in their names. We invoke sed twice because it is the + # simplest approach to changing $(DEPDIR) to its actual value in the + # expansion. + for file in `sed -n " + s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \ + sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g' -e 's/\$U/'"$U"'/g'`; do + # Make sure the directory exists. + test -f "$dirpart/$file" && continue + fdir=`$as_dirname -- "$file" || +$as_expr X"$file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$file" : 'X\(//\)[^/]' \| \ + X"$file" : 'X\(//\)$' \| \ + X"$file" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$file" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + as_dir=$dirpart/$fdir; as_fn_mkdir_p + # echo "creating $dirpart/$file" + echo '# dummy' > "$dirpart/$file" + done + done +} + ;; + + esac +done # for ac_tag + + +as_fn_exit 0 +_ACEOF +ac_clean_files=$ac_clean_files_save + +test $ac_write_fail = 0 || + as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5 + + +# configure is writing to config.log, and then calls config.status. +# config.status does its own redirection, appending to config.log. +# Unfortunately, on DOS this fails, as config.log is still kept open +# by configure, so config.status won't be able to write to it; its +# output is simply discarded. So we exec the FD to /dev/null, +# effectively closing config.log, so it can be properly (re)opened and +# appended to by config.status. When coming back to configure, we +# need to make the FD available again. +if test "$no_create" != yes; then + ac_cs_success=: + ac_config_status_args= + test "$silent" = yes && + ac_config_status_args="$ac_config_status_args --quiet" + exec 5>/dev/null + $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false + exec 5>>config.log + # Use ||, not &&, to avoid exiting from the if with $? = 1, which + # would make configure fail if this is the last instruction. + $ac_cs_success || as_fn_exit 1 +fi +if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 +$as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} +fi + + +echo "---------------------------------------------" +echo "Finished Running Zoltan Configure Script" +echo "---------------------------------------------" + diff --git a/Zoltan-3.90/configure.ac b/Zoltan-3.90/configure.ac new file mode 100644 index 00000000..37509602 --- /dev/null +++ b/Zoltan-3.90/configure.ac @@ -0,0 +1,463 @@ +# @HEADER +# +######################################################################## +# +# Zoltan Toolkit for Load-balancing, Partitioning, Ordering and Coloring +# Copyright 2012 Sandia Corporation +# +# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, +# the U.S. Government retains certain rights in this software. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# 3. Neither the name of the Corporation nor the names of the +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# Questions? Contact Karen Devine kddevin@sandia.gov +# Erik Boman egboman@sandia.gov +# +######################################################################## +# +# @HEADER +# ------------------------------------------------------------------------ +# Process this file with autoconf to produce a configure script. +# ------------------------------------------------------------------------ + +# ------------------------------------------------------------------------ +# Initialization +# ------------------------------------------------------------------------ + + + +# This must be the first line in configure.ac. +# Optional 3rd argument is email address for bugs. + +AC_INIT(Zoltan, 3.6, lriesen@sandia.gov) + +echo "----------------------------------------" +echo "Running Zoltan Configure Script" +echo "----------------------------------------" + +# This is to protect against accidentally specifying the wrong +# directory with --srcdir. Any file in that directory will do, +# preferably one that is unlikely to be removed or renamed. + +AC_CONFIG_SRCDIR([src/include/zoltan.h]) + +# Specify directory for auxillary build tools (e.g., install-sh, +# config.sub, config.guess) and M4 files. + +AC_CONFIG_AUX_DIR(config) + +# +# We don't want people to configure in the source directory. Some +# things may break. +# + +if test -e configure.ac ; then + echo "You are trying to run configure in the source directory. This is not allowed. Please run configure from a separate build directory." + exit +fi + +# Configure should create src/Zoltan_config.h from src/Zoltan_config.h.in +AM_CONFIG_HEADER(src/include/Zoltan_config.h:src/include/Zoltan_config.h.in) + +# Allow users to specify their own "install" command. If none is specified, +# the default is install-sh found in the config subdirectory. + +AC_ARG_WITH(install, + [AC_HELP_STRING([--with-install=INSTALL_PROGRAM], + [Use the installation program INSTALL_PROGRAM rather the default that is provided. For example --with-install="/path/install -p"])], + [ + INSTALL=$withval + INSTALL_PROGRAM=$withval + INSTALL_SCRIPT=$withval + INSTALL_DATA="$withval -m 644" + ],) + +# AM_MAINTAINER_MODE turns off maintainer-only makefile targets by +# default, and changes configure to understand a +# --enable-maintainer-mode option. --enable-maintainer-mode turns the +# maintainer-only targets back on. The maintainer-only makefile +# targets permit end users to clean automatically-generated files such +# as configure, which means they have to have autoconf and automake +# installed to repair the damage. AM_MAINTAINER_MODE makes it a bit +# harder for users to shoot themselves in the foot. + +AM_MAINTAINER_MODE + +# Define $build, $host, $target, etc + +AC_CANONICAL_TARGET + +# Note in header file and Makefile conditional what the host OS is + +AM_CONDITIONAL(HOST_CYGWIN, false) +AM_CONDITIONAL(HOST_LINUX, false) +AM_CONDITIONAL(HOST_SOLARIS, false) + +case $host_os in + cygwin) + AM_CONDITIONAL(HOST_CYGWIN, true) + AC_DEFINE(HOST_CYGWIN,1,[software host will be cygwin]) + ;; + linux*) + AM_CONDITIONAL(HOST_LINUX, true) + AC_DEFINE(HOST_LINUX,1,[software host will be linux]) + ;; + solaris*) + AM_CONDITIONAL(HOST_SOLARIS, true) + AC_DEFINE(HOST_SOLARIS,1,[software host will be solaris]) + ;; +esac + +# Use automake + +# - Required version of automake. +AM_INIT_AUTOMAKE(1.9.6 no-define tar-ustar) + +# Specify required version of autoconf. + +AC_PREREQ(2.59) + +#TAC_ARG_ENABLE_OPTION(fortran, [enable Fortran support], FORTRAN_SUPPORT, no) +#This option is not currently available +TAC_ARG_ENABLE_OPTION(f90interface, [enable Fortran 90 interface (automatically enables Fortran support)], F90INTERFACE, no) +AM_CONDITIONAL(BUILD_ZOLTAN_F90_INTERFACE, [test "X$ac_cv_use_f90interface" != "Xno"]) +#AM_CONDITIONAL(USE_FORTRAN, [test "X$ac_cv_use_fortran" != "Xno"]) + +if test "X$ac_cv_use_f90interface" = "Xyes"; then + ac_cv_use_fortran=yes +else + ac_cv_use_fortran=no +fi + +if test "X$ac_cv_use_fortran" = "Xyes"; then + ac_cv_use_fortran90=yes +# AX_F90_MODULE_FLAG +# if test "X$ax_cv_f90_modflag" = "Xunknown" ; then +# AC_MSG_ERROR([unable to find f90 modules extension]) +# else +# FCFLAGS="$ax_cv_f90_modflag../ $ax_cv_f90_modflag. ${FCFLAGS}" +# fi +else + ac_cv_use_fortran90=no +fi + +TAC_ARG_ENABLE_FEATURE_SUB(zoltan, cppdriver, [Enable Zoltan's C++ driver], ZOLTAN_CPPDRIVER, yes) +AM_CONDITIONAL(HAVE_ZOLTAN_CPPDRIVER, [test "X$ac_cv_use_zoltan_cppdriver" != "Xno"]) + +#This can be removed after we retire the old build system +#AC_DEFINE([TRILINOS_CONFIG_H],,[Define when using the autotools to build Zoltan]) + +# ------------------------------------------------------------------------ +# Check to see if MPI enabled and if any special configuration done +# ------------------------------------------------------------------------ + +# We may want to handle this differently because Zoltan requires MPI +#TAC_ARG_CONFIG_MPI + +ZAC_ARG_CONFIG_MPI + +# #np# - can eliminate compiler checks below if your package does not use the +# language corresponding to the check. Please note that if you use +# F77_FUNC to determine Fortran name mangling, you should not remove +# the Fortran compiler check or the check for Fortran flags. Doing +# so will prevent the detection of the proper name mangling in some +# cases. + +AC_ARG_ENABLE(mpi-recv-limit, + AS_HELP_STRING([--enable-mpi-recv-limit],[Set to the limit on the number of simultaneous MPI posted receives, if any; default is --enable-mpi-recv-limit=10]), + [ AC_MSG_NOTICE(Building Zoltan to observe a limit of $enable_mpi_recv_limit simultaneously posted MPI receives) + AC_SUBST(MPI_RECV_LIMIT_FLAG, "-DMPI_RECV_LIMIT=$enable_mpi_recv_limit") + ] + ) + +# ------------------------------------------------------------------------ +# Checks for programs +# ------------------------------------------------------------------------ + +AM_PROG_CC_C_O(cc gcc) +#Conditional only? +#if test "X$ac_cv_use_zoltan_cppdriver" = "Xyes"; then +AC_PROG_CXX(CC g++ c++ cxx) +#fi + +if test "X$ac_cv_use_fortran90" = "Xyes"; then +AC_PROG_FC(FC gfortran f90 xlf90 f95) +fi + +if test "X$ac_cv_use_fortran" = "Xyes"; then +AC_PROG_FC(FC gfortran f90 xlf90 f95) +fi + +AC_PROG_RANLIB + +# Check if --with-flags present, prepend any specs to FLAGS + +TAC_ARG_WITH_FLAGS(ccflags, CCFLAGS) +#if test "X$ac_cv_use_zoltan_cppdriver" = "Xyes"; then +TAC_ARG_WITH_FLAGS(cxxflags, CXXFLAGS) +CXXFLAGS="-DMPICH_IGNORE_CXX_SEEK ${CXXFLAGS}" +#fi +TAC_ARG_WITH_FLAGS(cflags, CFLAGS) +TAC_ARG_WITH_FLAGS(fcflags, FCFLAGS) +#TAC_ARG_WITH_FLAGS(fflags, FFLAGS) +TAC_ARG_WITH_LIBS +TAC_ARG_WITH_FLAGS(ldflags, LDFLAGS) + +# ------------------------------------------------------------------------ +# Alternate archiver +# ------------------------------------------------------------------------ + +TAC_ARG_WITH_AR + +# ------------------------------------------------------------------------ +# MPI link check +# ------------------------------------------------------------------------ +TAC_ARG_CHECK_MPI + +# ------------------------------------------------------------------------ +# Checks for Makefile.export related systems +# ------------------------------------------------------------------------ +# Add this later +TAC_ARG_ENABLE_EXPORT_MAKEFILES(yes) + +# ------------------------------------------------------------------------ +# Checks for special package flags +# ------------------------------------------------------------------------ + +ZAC_ARG_WITH_ID() + +TAC_ARG_ENABLE_OPTION(gzip, [enable zlib support for driver], GZIP, no) +if test "X$ac_cv_use_gzip" != "Xno"; then + LIBS="-lz ${LIBS}" +fi +AM_CONDITIONAL(BUILD_GZIP, [test "X$ac_cv_use_gzip" != "Xno"]) + +TAC_ARG_WITH_PACKAGE(parmetis, [Enable Parmetis support.], PARMETIS, no) +AM_CONDITIONAL(BUILD_PARMETIS, [test "X$ac_cv_use_parmetis" != "Xno"]) +TAC_ARG_WITH_3PL_SUB(parmetis, libdir, [Specify where the Parmetis library is located. Ex. /path/to/library]) +if test "X$tac_with_parmetis_libdir" != "Xno"; then + LIBS="-L${tac_with_parmetis_libdir} -lparmetis -lmetis ${LIBS}" +fi + +TAC_ARG_WITH_3PL_SUB(parmetis, incdir, [Specify where the Parmetis header files are located. Ex. /path/to/headers]) +# It was necessary to move the parmetis include directories before the scotch +# include directories. +#if test "X$tac_with_parmetis_incdir" != "Xno"; then +# CPPFLAGS="-I${tac_with_parmetis_incdir} ${CPPFLAGS}" +#fi + + +TAC_ARG_WITH_PACKAGE(scotch, [Enable Scotch support.], SCOTCH, no) +AM_CONDITIONAL(BUILD_SCOTCH, [test "X$ac_cv_use_scotch" != "Xno"]) +TAC_ARG_WITH_3PL_SUB(scotch, libdir, [Specify where the Scotch library is located. Ex. /path/to/library]) +if test "X$tac_with_scotch_libdir" != "Xno"; then + LIBS="-L${tac_with_scotch_libdir} -lptscotch -lptscotcherr -lptscotcherrexit -lscotch -lscotcherr -lscotcherrexit ${LIBS}" +fi +TAC_ARG_WITH_3PL_SUB(scotch, incdir, [Specify where the Scotch header files are located. Ex. /path/to/headers]) +if test "X$tac_with_scotch_incdir" != "Xno"; then + CPPFLAGS="-I${tac_with_scotch_incdir} ${CPPFLAGS}" +fi +# The parmetis include directories need to be before the Scotch include +# directories. +if test "X$tac_with_parmetis_incdir" != "Xno"; then + CPPFLAGS="-I${tac_with_parmetis_incdir} ${CPPFLAGS}" +fi + +TAC_ARG_WITH_PACKAGE(patoh, [Enable Patoh support.], PATOH, no) +TAC_ARG_WITH_3PL_SUB(patoh, libdir, [Specify where the Patoh library is located. Ex. /path/to/library]) +if test "X$tac_with_patoh_libdir" != "Xno"; then + LIBS="-L${tac_with_patoh_libdir} -lpatoh ${LIBS}" +fi +TAC_ARG_WITH_3PL_SUB(patoh, incdir, [Specify where the Patoh header files are located. Ex. /path/to/headers]) +if test "X$tac_with_patoh_incdir" != "Xno"; then + CPPFLAGS="-I${tac_with_patoh_incdir} ${CPPFLAGS}" +fi + +#TAC_ARG_WITH_PACKAGE(drum, [Enable Drum support. Library name as well as library and include paths must be specified using LDFLAGS and CPPFLAGS.], DRUM, no) +#TAC_ARG_WITH_3PL_SUB(drum, libdir, [Specify where the Drum library is located. Ex. /path/to/library]) +#if test "X$tac_with_drum_libdir" != "Xno"; then +# LIBS="-L${tac_with_drum_libdir} ${LIBS}" +#fi +#TAC_ARG_WITH_3PL_SUB(drum, incdir, [Specify where the Drum header files are located. Ex. /path/to/headers]) +#if test "X$tac_with_drum_incdir" != "Xno"; then +# CPPFLAGS="-I${tac_with_drum_incdir} ${CPPFLAGS}" +#fi + +TAC_ARG_WITH_PACKAGE(nemesis_exodus, [Enable Nemesis/Exodus support for the Zdrive test executable. Library and include paths must be specified using LDFLAGS and CFLAGS.], NEMESIS_EXODUS, no) +#I am not sure if we can do this. We may not be able to get things in the right +#order. JW. +if test "X$ac_cv_use_nemesis_exodus" != "Xno"; then + LIBS="-lnemIc -lexoIIv2c -lnetcdf ${LIBS}" +fi + +LIBS="${LIBS} -lm" + +#TAC_ARG_WITH_PACKAGE(parkway, [Enable Parkway support.], PARKWAY, no) +#TAC_ARG_WITH_3PL_SUB(parkway, libdir, [Specify where the Parkway library is located. Ex. /path/to/library]) +#if test "X$tac_with_parkway_libdir" != "Xno"; then +# LIBS="-L${tac_with_parkway_libdir} -lparkway -lpmpich++ -lstdc++ ${LIBS}" +#fi +#TAC_ARG_WITH_3PL_SUB(parkway, incdir, [Specify where the Parkway header files are located. Ex. /path/to/headers]) +#if test "X$tac_with_parkway_incdir" != "Xno"; then +# CPPFLAGS="-I${tac_with_parkway_incdir} ${CPPFLAGS}" +#fi + +# ------------------------------------------------------------------------ +# Checks if tests and examples should be built +# ------------------------------------------------------------------------ + + +TAC_ARG_ENABLE_FEATURE(tests, [Make tests for all Trilinos packages buildable with 'make tests'], TESTS, yes) +TAC_ARG_ENABLE_FEATURE_SUB_CHECK( zoltan, tests, [Make Zoltan tests buildable with 'make tests'], ZOLTAN_TESTS) +AM_CONDITIONAL(BUILD_TESTS, test "X$ac_cv_use_zoltan_tests" != "Xno") + +TAC_ARG_ENABLE_FEATURE(examples, [Make examples for all Trilinos packages buildable with 'make examples'], EXAMPLES, yes) +TAC_ARG_ENABLE_FEATURE_SUB_CHECK( zoltan, examples, [Make Zoltan examples buildable with 'make examples'], ZOLTAN_EXAMPLES) +AM_CONDITIONAL(BUILD_EXAMPLES, test "X$ac_cv_use_zoltan_examples" != "Xno") + +#We now build tests and examples through separate make targets, rather than +#during "make". We still need to conditionally include the test and example +#in SUBDIRS, even though SUB_TEST and SUB_EXAMPLE will never be +#defined, so that the tests and examples are included in the distribution +#tarball. +AM_CONDITIONAL(SUB_TEST, test "X$ac_cv_use_sub_test" = "Xyes") +AM_CONDITIONAL(SUB_EXAMPLE, test "X$ac_cv_use_sub_example" = "Xyes") + +#TAC_ARG_ENABLE_FEATURE(libcheck, [Check for some third-party libraries. (Cannot be disabled unless tests and examples are also disabled.)], LIBCHECK, yes) + +# ------------------------------------------------------------------------ +# Specify other directories +# ------------------------------------------------------------------------ + +# enable use of --with-libdirs="-Llibdir1 -Llibdir2 ..." to prepend to LDFLAGS +TAC_ARG_WITH_LIBDIRS +# enable use of --with-incdirs="-Iincdir1 -Iincdir2 ..." to prepend to CPPFLAGS +TAC_ARG_WITH_INCDIRS + +# ------------------------------------------------------------------------ +# Checks for libraries +# ------------------------------------------------------------------------ + +ax_cv_f90_modulecase="lower" +FC_VENDOR="none" + +# Define F77_FUNC that will be used to link with Fortran subroutines. +if test "X$ac_cv_use_fortran" != "Xno"; then + AC_FC_WRAPPERS + AC_FC_SRCEXT(f) + AC_FC_SRCEXT(f90) + # Determine compile-line flag for F90 modules (e.g., -M). + AX_F90_MODULE_FLAG + if test "X$ax_cv_f90_modflag" = "Xunknown" ; then + AC_MSG_ERROR([unable to find f90 modules extension]) + else + FCFLAGS="$ax_cv_f90_modflag../ $ax_cv_f90_modflag. ${FCFLAGS}" + fi + # Determine case (upper or lower) of F90 module files. + AX_F90_MODULE_CASE + echo "KDDKDD " $ax_cv_f90_modulecase + + WK_FC_GET_VENDOR() + echo "KDDKDD " $FC_VENDOR + if test "$FC_VENDOR" = "PGI"; then + FCFLAGS="-DPGI ${FCFLAGS}" + CFLAGS="-DPGI ${CFLAGS}" + fi + if test "$FC_VENDOR" = "Lahey"; then + FCFLAGS="-DFUJITSU ${FCFLAGS}" + CFLAGS="-DFUJITSU ${CFLAGS}" + fi +fi +AM_CONDITIONAL(F90_MODULE_LOWERCASE, [test "X$ax_cv_f90_modulecase" != "Xupper"]) +AM_CONDITIONAL(NAG_F90_COMPILER, [test "X$FC_VENDOR" == "XNAG"]) + +# If tests, examples and libcheck are disabled, we don't have to check +# for these libraries. + +#if test "X$ac_cv_use_new_package_examples" != "Xno" || test "X$ac_cv_use_libcheck" != "Xno"; then +#if test "X$ac_cv_use_zoltan_tests" != "Xno" || test "X$ac_cv_use_zoltan_examples" != "Xno" || test "X$ac_cv_use_libcheck" != "Xno"; then +# checks for libraries now +dnl Replace `main' with a function in -lm: +#AC_SEARCH_LIBS(pow,[m],,AC_MSG_ERROR(Cannot find math library)) +#AC_SEARCH_LIBS(sqrt,[m],,AC_MSG_ERROR(Cannot find math library)) +#fi +# end of the list of libraries that don't need to be checked for if +# tests and examples are disabled. + +# ------------------------------------------------------------------------ +# Checks for linker characteristics +# ------------------------------------------------------------------------ + +# Determine libraries needed for linking with Fortran +#AC_FC_LIBRARY_LDFLAGS +if test "X$ac_cv_use_fortran" = "Xyes"; then +AC_FC_LIBRARY_LDFLAGS +fi + +# ------------------------------------------------------------------------ +# Zoltan may be built via autotools, cmake, or our native makefile +# ------------------------------------------------------------------------ + +CPPFLAGS="-DAUTOTOOLS_BUILD ${CPPFLAGS}" + +# ------------------------------------------------------------------------ +# Perform substitutions in output files +# ------------------------------------------------------------------------ + +AC_SUBST(ac_aux_dir) + +# ------------------------------------------------------------------------ +# Output files +# ------------------------------------------------------------------------ +# +AC_CONFIG_FILES([ + Makefile + src/Makefile + src/driver/Makefile + src/fdriver/Makefile + Makefile.export.zoltan + example/Makefile + example/C/Makefile + example/CPP/Makefile + siMPI/Makefile + siMPI/pyMPI/Makefile + siMPI/pyMPI/siMPI/Makefile + ]) +# test/Large_Data/Makefile +# example/lib/Makefile +# example/C/Makefile +# example/C/sparse_matrix/Makefile +# example/CPP/Makefile +# Utilities/Makefile + +AC_OUTPUT() + +echo "---------------------------------------------" +echo "Finished Running Zoltan Configure Script" +echo "---------------------------------------------" + diff --git a/Zoltan-3.90/doc/NEA_docs/developer_html/dev_hybrid.html b/Zoltan-3.90/doc/NEA_docs/developer_html/dev_hybrid.html new file mode 100644 index 00000000..121993a9 --- /dev/null +++ b/Zoltan-3.90/doc/NEA_docs/developer_html/dev_hybrid.html @@ -0,0 +1,515 @@ + + + + + + + + + Zoltan Developer's Guide: Hybrid Partitioning + + + +

+Appendix: Hybrid Partitioning

+Hybrid partitioning is an amalgam of Zoltan's native parallel hypergraph +partitioner (PHG) and it Recursive Coordinate +Bisection algortihm (RCB). Hybrid partitioning can +be useful when a user is looking to strike a happy medium of both efficiency +and fidelity in their work. Traditional Zoltan-PHG is well suited to minimize +the number of cut hyperedges in the system, but it is comparatively slow due +to the multiple layers of coarsening it goes through and the standard matching +methods used to calculate new vertices for the coarser hypergraph. + + +

+Hypergraph partitioning is a useful partitioning and +load balancing method when connectivity data is available. It can be +viewed as a more sophisticated alternative to +the traditional graph partitioning. +

A hypergraph consists of vertices and hyperedges. A hyperedge +connects +one or more vertices. A graph is a special case of a hypergraph where +each edge has size two (two vertices). The hypergraph model is well +suited to parallel computing, where vertices correspond to data objects +and hyperedges represent the communication requirements. The basic +partitioning problem is to partition the vertices into k +approximately equal sets such that the number of cut hyperedges is +minimized. Most partitioners (including Zoltan-PHG) allows a more +general +model where both vertices and hyperedges can be assigned weights. +It has been +shown that the hypergraph model gives a more accurate representation +of communication cost (volume) than the graph model. In particular, +for sparse matrix-vector multiplication, the hypergraph model +exactly represents communication volume. Sparse +matrices can be partitioned either along rows or columns; +in the row-net model the columns are vertices and each row corresponds +to an hyperedge, while in the column-net model the roles of vertices +and hyperedges are reversed.

+

Zoltan contains a native parallel hypergraph partitioner, called PHG +(Parallel HyperGraph partitioner). In addition, Zoltan provides +access to PaToH, +a serial hypergraph partitioner. +Note that PaToH is not part of Zoltan and should be obtained +separately from the +PaToH web site. +Zoltan-PHG is a fully parallel multilevel hypergraph partitioner. For +further technical description, see [Devine et al, 2006].
+

+

Algorithm:

+The algorithm used is multilevel hypergraph partitioning. For +coarsening, several versions of inner product (heavy connectivity) +matching are available. +The refinement is based on Fiduccia-Mattheysis (FM) but in parallel it +is only an approximation. + +

Parallel implementation:

+A novel feature of our parallel implementation is that we use a 2D +distribution of the hypergraph. That is, each processor owns partial +data about some vertices and some hyperedges. The processors are +logically organized in a 2D grid as well. Most communication is limited +to either a processor row or column. This design should allow for +good scalability on large number of processors.
+ +

Data structures:

+The hypergraph is the most important data structure. This is stored as +a compressed sparse matrix. Note that in parallel, each processor owns +a local part of the global hypergraph +(a submatrix of the whole matrix). +The hypergraph data type is struct HGraph, and contains +information like number of vertices, hyperedges, pins, compressed +storage of all pins, optional vertex and edge weights, pointers +to relevant communicators, and more. One cryptic notation needs an +explanation: The arrays hindex, hvertex are used to +look up vertex info given a hyperedge, and vindex, vedge are +used to look up hyperedge info given a vertex. Essentially, +we store the hypergraph as a sparse matrix in both CSR and CSC formats. +This doubles the memory cost but gives better performance. +The data on each processor is stored using local indexing, starting at zero. +In order to get the global vertex or edge number, use the macros +VTX_LNO_TO_GNO and EDGE_LNO_TO_GNO. These macros will +look up the correct offsets (using the dist_x and dist_y arrays). +Note that phg->nVtx is always the local number of vertices, +which may be zero on some processors. + +

Parameters:

+In the User's Guide, only the most essential parameters have been +documented. There are several other parameters, intended for developers +and perhaps expert "power" users. We give a more complete list of all +parameters below. Note that these parameters may change in future versions!
+
+For a precise list of parameters in a particular version of Zoltan, look at the source code (phg.c). + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Method String:HYPERGRAPH
Parameters:
+
    HYPERGRAPH_PACKAGE
+
PHG (parallel) or PaToH (serial)
+
   CHECK_HYPERGRAPH
+
Check if input data is valid. +(Slows performance;intended for debugging.)
+
    +PHG_OUTPUT_LEVEL
+
Level of verbosity; 0 is silent.
+
    PHG_FINAL_OUTPUT
+
Print stats about final +partition? (0/1)
+
    PHG_NPROC_VERTEX
+
Desired number of processes in +the vertex direction (for 2D internal layout)
    PHG_NPROC_HEDGE
+
Desired number of processes in +the hyperedge direction (for 2D internal layout)
    PHG_COARSENING_METHODThe method to use in matching/coarsening; currently these are +available. 
+ agg - agglomerative inner product +matching (a.k.a. heavy connectivity matching)
+ ipm - inner product +matching (a.k.a. heavy connectivity matching)
+ c-ipm -  column +ipm;  faster method based on ipm within processor columns
+ a-ipm - alternate +between fast method (l-ipm ) and ipm
+ l-ipm -  local ipm +on each processor. Fastest option  but often gives poor quality.
+ h-ipm - hybrid ipm that  uses partial c-ipm followed +by ipm on each level
+
+
    PHG_COARSENING_LIMIT
+
Number of vertices at which to stop coarsening.
+
    PHG_VERTEX_VISIT_ORDER
+
Ordering of vertices in greedy +matching scheme:
+0 - random
+1 - natural order (as given by the query functions)
+2 - increasing vertex weights
+3 - increasing vertex degree
+4 - increasing vertex degree, weighted by pins
+
    PHG_EDGE_SCALING
+
Scale edge weights by some +function of size of the hyperedges:
+0 - no scaling
+1 - scale by 1/(size-1)     [absorption scaling]
+2 - scale by 2/((size*size-1)) [clique scaling]
+
    PHG_VERTEX_SCALING
+
Variations in "inner product" +similarity metric (for matching):
+0 - Euclidean inner product: <x,y>
+1 - cosine similarity: <x,y>/(|x|*|y|)
+2 - <x,y>/(|x|^2 * |y|^2)
+3 - scale by sqrt of vertex weights
+4 - scale by vertex weights
+
    PHG_COARSEPARTITION_METHODMethod to partition the coarsest (smallest) hypergraph; +typically done in serial:
+ random - random
+ linear - linear +(natural) order
+ greedy - greedy method +based on minimizing cuts
+ auto - automatically +select from the above methods (in parallel, the processes will do +different methods)
+
    PHG_REFINEMENT_METHOD
+
Refinement algorithm:
fm - two-way +approximate  FM
+ none - no refinement
+
    PHG_REFINEMENT_LOOP_LIMITLoop limit in FM refinement. Higher number means more +refinement.
+
    PHG_REFINEMENT_MAX_NEG_MOVE
+
Maximum number of negative moves allowed in FM.
+
   PHG_BAL_TOL_ADJUSTMENT
+
Controls how the balance tolerance is adjusted at +each level of bisection.
+
  PHG_RANDOMIZE_INPUT
+
Randomize layout of vertices and +hyperedges in internal parallel 2D layout? (0/1)
+
  PHG_EDGE_WEIGHT_OPERATION + Operation to be applied to edge +weights supplied by different processes for the same hyperedge:
+ add - the hyperedge weight will be the sum of the supplied +weights
+ max - the hyperedge weight will be the maximum of the +supplied weights
+ error - if the hyperedge weights are not equal, Zoltan +will flag an error, otherwise the hyperedge weight will be the value +returned by the processes
+
   EDGE_SIZE_THRESHOLD
+
Ignore hyperedges greater than this fraction times +number of vertices.
+
   PATOH_ALLOC_POOL0
+
Memory allocation for PaToH; see +the PaToH manual for details.
+
   PATOH_ALLOC_POOL1
+
Memory allocation for PaToH; see +the PaToH manual for details.
Default values:
+

+
HYPERGRAPH_PACKAGE = PHG
+

+
CHECK_HYPERGRAPH += 0
+

+
PHG_OUTPUT_LEVEL=0

+
PHG_FINAL_OUTPUT=0

+
PHG_REDUCTION_METHOD=ipm

+
PHG_REDUCTION_LIMIT=100

+
PHG_VERTEX_VISIT_ORDER=0

+
PHG_EDGE_SCALING=0

+
PHG_VERTEX_SCALING=0

+
PHG_COARSEPARTITION_METHOD=greedy

+
PHG_REFINEMENT_METHOD=fm

+
PHG_REFINEMENT_LOOP_LIMIT=10

+
PHG_REFINEMENT_MAX_NEG_MOVE=100

+
PHG_BAL_TOL_ADJUSTMENT=0.7

+
PHG_RANDOMIZE_INPUT=0

+
PHG_EDGE_WEIGHT_OPERATION=max

+
EDGE_SIZE_THRESHOLD=0.25

+
PATOH_ALLOC_POOL0=0

+
PATOH_ALLOC_POOL1=0
Required Query Functions:
+

+
ZOLTAN_NUM_OBJ_FN

+
ZOLTAN_OBJ_LIST_FN +or ZOLTAN_FIRST_OBJ_FN/ZOLTAN_NEXT_OBJ_FN +pair

+
ZOLTAN_HG_SIZE_CS_FN +
+ ZOLTAN_HG_CS_FN +
Optional Query Functions:
+

+
ZOLTAN_HG_SIZE_EDGE_WTS_FN

+
ZOLTAN_HG_EDGE_WTS_FN
+

+It is possible to provide the graph query functions instead of the +hypergraph queries, though this is not recommended. If only graph query +functions are registered, Zoltan will automatically create a hypergraph +from the graph, but some information (specifically, edge weights) will +be lost.

+
[Table of Contents  | Next:  +Refinement Tree Partitioning(NEANEA CHANGE ME)  |  Previous:  +ParMetis(NEANEA CHANGE ME)  |  Privacy and Security] + + diff --git a/Zoltan-3.90/doc/NEA_docs/writeup/128_breakdown_percent.pdf b/Zoltan-3.90/doc/NEA_docs/writeup/128_breakdown_percent.pdf new file mode 100644 index 00000000..43e06ffb Binary files /dev/null and b/Zoltan-3.90/doc/NEA_docs/writeup/128_breakdown_percent.pdf differ diff --git a/Zoltan-3.90/doc/NEA_docs/writeup/128_cutl.pdf b/Zoltan-3.90/doc/NEA_docs/writeup/128_cutl.pdf new file mode 100644 index 00000000..6f315d35 Binary files /dev/null and b/Zoltan-3.90/doc/NEA_docs/writeup/128_cutl.pdf differ diff --git a/Zoltan-3.90/doc/NEA_docs/writeup/128_time.pdf b/Zoltan-3.90/doc/NEA_docs/writeup/128_time.pdf new file mode 100644 index 00000000..ba14c5fa Binary files /dev/null and b/Zoltan-3.90/doc/NEA_docs/writeup/128_time.pdf differ diff --git a/Zoltan-3.90/doc/NEA_docs/writeup/2_breakdown_percent.pdf b/Zoltan-3.90/doc/NEA_docs/writeup/2_breakdown_percent.pdf new file mode 100644 index 00000000..163b12c6 Binary files /dev/null and b/Zoltan-3.90/doc/NEA_docs/writeup/2_breakdown_percent.pdf differ diff --git a/Zoltan-3.90/doc/NEA_docs/writeup/2_cutl.pdf b/Zoltan-3.90/doc/NEA_docs/writeup/2_cutl.pdf new file mode 100644 index 00000000..77a55467 Binary files /dev/null and b/Zoltan-3.90/doc/NEA_docs/writeup/2_cutl.pdf differ diff --git a/Zoltan-3.90/doc/NEA_docs/writeup/2_time.pdf b/Zoltan-3.90/doc/NEA_docs/writeup/2_time.pdf new file mode 100644 index 00000000..df6644c6 Binary files /dev/null and b/Zoltan-3.90/doc/NEA_docs/writeup/2_time.pdf differ diff --git a/Zoltan-3.90/doc/NEA_docs/writeup/hybrid_current.aux b/Zoltan-3.90/doc/NEA_docs/writeup/hybrid_current.aux new file mode 100644 index 00000000..c9331876 --- /dev/null +++ b/Zoltan-3.90/doc/NEA_docs/writeup/hybrid_current.aux @@ -0,0 +1,38 @@ +\relax +\ifx\hyper@anchor\@undefined +\global \let \oldcontentsline\contentsline +\gdef \contentsline#1#2#3#4{\oldcontentsline{#1}{#2}{#3}} +\global \let \oldnewlabel\newlabel +\gdef \newlabel#1#2{\newlabelxx{#1}#2} +\gdef \newlabelxx#1#2#3#4#5#6{\oldnewlabel{#1}{{#2}{#3}}} +\AtEndDocument{\let \contentsline\oldcontentsline +\let \newlabel\oldnewlabel} +\else +\global \let \hyper@last\relax +\fi + +\@writefile{toc}{\contentsline {section}{\numberline {1}Introduction}{1}{section.1}} +\@writefile{toc}{\contentsline {section}{\numberline {2}Parallel hypergraphs and geometric input}{1}{section.2}} +\@writefile{toc}{\contentsline {section}{\numberline {3}PHG, MPI and 2-dimensional representation}{2}{section.3}} +\@writefile{lot}{\contentsline {table}{\numberline {1}{\ignorespaces Before communication}}{2}{table.1}} +\newlabel{tab:0/tc}{{1}{2}{\label {tab:0/tc} Before communication\relax }{table.1}{}} +\@writefile{lot}{\contentsline {table}{\numberline {2}{\ignorespaces After communication}}{2}{table.2}} +\newlabel{tab:1/tc}{{2}{2}{\label {tab:1/tc} After communication\relax }{table.2}{}} +\@writefile{toc}{\contentsline {section}{\numberline {4}Matching}{3}{section.4}} +\@writefile{toc}{\contentsline {section}{\numberline {5}Reduction factor}{3}{section.5}} +\citation{Catalyurek} +\@writefile{toc}{\contentsline {section}{\numberline {6}Results}{4}{section.6}} +\@writefile{lof}{\contentsline {figure}{\numberline {1}{\ignorespaces Runtimes on 128 processors}}{4}{figure.1}} +\newlabel{fig:Times_np_128}{{1}{4}{Runtimes on 128 processors\relax }{figure.1}{}} +\bibcite{Catalyurek}{1} +\@writefile{toc}{\contentsline {section}{\numberline {7}Conclusion and discussion}{5}{section.7}} +\@writefile{lof}{\contentsline {figure}{\numberline {2}{\ignorespaces Cuts on 128 processors}}{6}{figure.2}} +\newlabel{fig:Cuts_np_128}{{2}{6}{Cuts on 128 processors\relax }{figure.2}{}} +\@writefile{lof}{\contentsline {figure}{\numberline {3}{\ignorespaces Timing by percentage on 128 processors (UL, Shockstem 3D; UR, Shockstem 3D -- 108; LL, RPI; LR, Slac1.5}}{6}{figure.3}} +\newlabel{fig:Percent_np_128}{{3}{6}{Timing by percentage on 128 processors (UL, Shockstem 3D; UR, Shockstem 3D -- 108; LL, RPI; LR, Slac1.5\relax }{figure.3}{}} +\@writefile{lof}{\contentsline {figure}{\numberline {4}{\ignorespaces Runtimes in serial on 2 processors}}{7}{figure.4}} +\newlabel{fig:Times_np_2}{{4}{7}{Runtimes in serial on 2 processors\relax }{figure.4}{}} +\@writefile{lof}{\contentsline {figure}{\numberline {5}{\ignorespaces Cuts in serial on 2 processors}}{7}{figure.5}} +\newlabel{fig:Cuts_np_2}{{5}{7}{Cuts in serial on 2 processors\relax }{figure.5}{}} +\@writefile{lof}{\contentsline {figure}{\numberline {6}{\ignorespaces Timing by percentage on 2 processors (UL, Shockstem 3D; UR, Shockstem 3D -- 108; LL, RPI; LR, Slac1.5}}{9}{figure.6}} +\newlabel{fig:Percent_np_2}{{6}{9}{Timing by percentage on 2 processors (UL, Shockstem 3D; UR, Shockstem 3D -- 108; LL, RPI; LR, Slac1.5\relax }{figure.6}{}} diff --git a/Zoltan-3.90/doc/NEA_docs/writeup/hybrid_current.log b/Zoltan-3.90/doc/NEA_docs/writeup/hybrid_current.log new file mode 100644 index 00000000..b6dc307e --- /dev/null +++ b/Zoltan-3.90/doc/NEA_docs/writeup/hybrid_current.log @@ -0,0 +1,336 @@ +This is pdfTeXk, Version 3.141592-1.40.3 (Web2C 7.5.6) (format=pdflatex 2011.6.3) 18 AUG 2011 13:37 +entering extended mode + %&-line parsing enabled. +**hybrid_current.tex +(./hybrid_current.tex +LaTeX2e <2005/12/01> +Babel and hyphenation patterns for english, usenglishmax, dumylang, noh +yphenation, arabic, basque, bulgarian, coptic, welsh, czech, slovak, german, ng +erman, danish, esperanto, spanish, catalan, galician, estonian, farsi, finnish, + french, greek, monogreek, ancientgreek, croatian, hungarian, interlingua, ibyc +us, indonesian, icelandic, italian, latin, mongolian, dutch, norsk, polish, por +tuguese, pinyin, romanian, russian, slovenian, uppersorbian, serbian, swedish, +turkish, ukenglish, ukrainian, loaded. +(/usr/share/texmf/tex/latex/base/article.cls +Document Class: article 2005/09/16 v1.4f Standard LaTeX document class +(/usr/share/texmf/tex/latex/base/size12.clo +File: size12.clo 2005/09/16 v1.4f Standard LaTeX file (size option) +) +\c@part=\count79 +\c@section=\count80 +\c@subsection=\count81 +\c@subsubsection=\count82 +\c@paragraph=\count83 +\c@subparagraph=\count84 +\c@figure=\count85 +\c@table=\count86 +\abovecaptionskip=\skip41 +\belowcaptionskip=\skip42 +\bibindent=\dimen102 +) +(/usr/share/texmf/tex/latex/amsmath/amsmath.sty +Package: amsmath 2000/07/18 v2.13 AMS math features +\@mathmargin=\skip43 + +For additional information on amsmath, use the `?' option. +(/usr/share/texmf/tex/latex/amsmath/amstext.sty +Package: amstext 2000/06/29 v2.01 + +(/usr/share/texmf/tex/latex/amsmath/amsgen.sty +File: amsgen.sty 1999/11/30 v2.0 +\@emptytoks=\toks14 +\ex@=\dimen103 +)) +(/usr/share/texmf/tex/latex/amsmath/amsbsy.sty +Package: amsbsy 1999/11/29 v1.2d +\pmbraise@=\dimen104 +) +(/usr/share/texmf/tex/latex/amsmath/amsopn.sty +Package: amsopn 1999/12/14 v2.01 operator names +) +\inf@bad=\count87 +LaTeX Info: Redefining \frac on input line 211. +\uproot@=\count88 +\leftroot@=\count89 +LaTeX Info: Redefining \overline on input line 307. +\classnum@=\count90 +\DOTSCASE@=\count91 +LaTeX Info: Redefining \ldots on input line 379. +LaTeX Info: Redefining \dots on input line 382. +LaTeX Info: Redefining \cdots on input line 467. +\Mathstrutbox@=\box26 +\strutbox@=\box27 +\big@size=\dimen105 +LaTeX Font Info: Redeclaring font encoding OML on input line 567. +LaTeX Font Info: Redeclaring font encoding OMS on input line 568. +\macc@depth=\count92 +\c@MaxMatrixCols=\count93 +\dotsspace@=\muskip10 +\c@parentequation=\count94 +\dspbrk@lvl=\count95 +\tag@help=\toks15 +\row@=\count96 +\column@=\count97 +\maxfields@=\count98 +\andhelp@=\toks16 +\eqnshift@=\dimen106 +\alignsep@=\dimen107 +\tagshift@=\dimen108 +\tagwidth@=\dimen109 +\totwidth@=\dimen110 +\lineht@=\dimen111 +\@envbody=\toks17 +\multlinegap=\skip44 +\multlinetaggap=\skip45 +\mathdisplay@stack=\toks18 +LaTeX Info: Redefining \[ on input line 2666. +LaTeX Info: Redefining \] on input line 2667. +) +(/usr/share/texmf/tex/latex/graphics/graphicx.sty +Package: graphicx 1999/02/16 v1.0f Enhanced LaTeX Graphics (DPC,SPQR) + +(/usr/share/texmf/tex/latex/graphics/keyval.sty +Package: keyval 1999/03/16 v1.13 key=value parser (DPC) +\KV@toks@=\toks19 +) +(/usr/share/texmf/tex/latex/graphics/graphics.sty +Package: graphics 2006/02/20 v1.0o Standard LaTeX Graphics (DPC,SPQR) + +(/usr/share/texmf/tex/latex/graphics/trig.sty +Package: trig 1999/03/16 v1.09 sin cos tan (DPC) +) +(/usr/share/texmf/tex/latex/config/graphics.cfg +File: graphics.cfg 2007/01/18 v1.5 graphics configuration of teTeX/TeXLive +) +Package graphics Info: Driver file: pdftex.def on input line 90. + +(/usr/share/texmf/tex/latex/pdftex-def/pdftex.def +File: pdftex.def 2007/01/08 v0.04d Graphics/color for pdfTeX +\Gread@gobject=\count99 +)) +\Gin@req@height=\dimen112 +\Gin@req@width=\dimen113 +) +(/usr/share/texmf/tex/latex/tools/verbatim.sty +Package: verbatim 2003/08/22 v1.5q LaTeX2e package for verbatim enhancements +\every@verbatim=\toks20 +\verbatim@line=\toks21 +\verbatim@in@stream=\read1 +) +(/usr/share/texmf/tex/latex/graphics/color.sty +Package: color 2005/11/14 v1.0j Standard LaTeX Color (DPC) + +(/usr/share/texmf/tex/latex/config/color.cfg +File: color.cfg 2007/01/18 v1.5 color configuration of teTeX/TeXLive +) +Package color Info: Driver file: pdftex.def on input line 130. +) +(/usr/share/texmf/tex/latex/subfigure/subfigure.sty +Package: subfigure 2002/03/15 v2.1.5 subfigure package +\subfigtopskip=\skip46 +\subfigcapskip=\skip47 +\subfigcaptopadj=\dimen114 +\subfigbottomskip=\skip48 +\subfigcapmargin=\dimen115 +\subfiglabelskip=\skip49 +\c@subfigure=\count100 +\c@lofdepth=\count101 +\c@subtable=\count102 +\c@lotdepth=\count103 + +**************************************** +* Local config file subfigure.cfg used * +**************************************** +(/usr/share/texmf/tex/latex/subfigure/subfigure.cfg) +\subfig@top=\skip50 +\subfig@bottom=\skip51 +) +(/usr/share/texmf/tex/latex/hyperref/hyperref.sty +Package: hyperref 2007/02/07 v6.75r Hypertext links for LaTeX +\@linkdim=\dimen116 +\Hy@linkcounter=\count104 +\Hy@pagecounter=\count105 + +(/usr/share/texmf/tex/latex/hyperref/pd1enc.def +File: pd1enc.def 2007/02/07 v6.75r Hyperref: PDFDocEncoding definition (HO) +) +(/usr/share/texmf/tex/latex/config/hyperref.cfg +File: hyperref.cfg 2002/06/06 v1.2 hyperref configuration of TeXLive +) +(/usr/share/texmf/tex/latex/oberdiek/kvoptions.sty +Package: kvoptions 2006/08/22 v2.4 Connects package keyval with LaTeX options ( +HO) +) +Package hyperref Info: Hyper figures OFF on input line 2288. +Package hyperref Info: Link nesting OFF on input line 2293. +Package hyperref Info: Hyper index ON on input line 2296. +Package hyperref Info: Plain pages OFF on input line 2303. +Package hyperref Info: Backreferencing OFF on input line 2308. + +Implicit mode ON; LaTeX internals redefined +Package hyperref Info: Bookmarks ON on input line 2444. +(/usr/share/texmf/tex/latex/ltxmisc/url.sty +\Urlmuskip=\muskip11 +Package: url 2005/06/27 ver 3.2 Verb mode for urls, etc. +) +LaTeX Info: Redefining \url on input line 2599. +\Fld@menulength=\count106 +\Field@Width=\dimen117 +\Fld@charsize=\dimen118 +\Choice@toks=\toks22 +\Field@toks=\toks23 +Package hyperref Info: Hyper figures OFF on input line 3102. +Package hyperref Info: Link nesting OFF on input line 3107. +Package hyperref Info: Hyper index ON on input line 3110. +Package hyperref Info: backreferencing OFF on input line 3117. +Package hyperref Info: Link coloring OFF on input line 3122. +\Hy@abspage=\count107 +\c@Item=\count108 +\c@Hfootnote=\count109 +) +*hyperref using default driver hpdftex* +(/usr/share/texmf/tex/latex/hyperref/hpdftex.def +File: hpdftex.def 2007/02/07 v6.75r Hyperref driver for pdfTeX +\Fld@listcount=\count110 +) (./hybrid_current.aux) +\openout1 = `hybrid_current.aux'. + +LaTeX Font Info: Checking defaults for OML/cmm/m/it on input line 26. +LaTeX Font Info: ... okay on input line 26. +LaTeX Font Info: Checking defaults for T1/cmr/m/n on input line 26. +LaTeX Font Info: ... okay on input line 26. +LaTeX Font Info: Checking defaults for OT1/cmr/m/n on input line 26. +LaTeX Font Info: ... okay on input line 26. +LaTeX Font Info: Checking defaults for OMS/cmsy/m/n on input line 26. +LaTeX Font Info: ... okay on input line 26. +LaTeX Font Info: Checking defaults for OMX/cmex/m/n on input line 26. +LaTeX Font Info: ... okay on input line 26. +LaTeX Font Info: Checking defaults for U/cmr/m/n on input line 26. +LaTeX Font Info: ... okay on input line 26. +LaTeX Font Info: Checking defaults for PD1/pdf/m/n on input line 26. +LaTeX Font Info: ... okay on input line 26. +Package hyperref Info: Link coloring OFF on input line 26. + +(/usr/share/texmf/tex/latex/hyperref/nameref.sty +Package: nameref 2006/12/27 v2.28 Cross-referencing by name of section + +(/usr/share/texmf/tex/latex/oberdiek/refcount.sty +Package: refcount 2006/02/20 v3.0 Data extraction from references (HO) +) +\c@section@level=\count111 +) +LaTeX Info: Redefining \ref on input line 26. +LaTeX Info: Redefining \pageref on input line 26. + (./hybrid_current.out) +(./hybrid_current.out) +\@outlinefile=\write3 +\openout3 = `hybrid_current.out'. + + +! Missing $ inserted. + + $ +l.73 that is, \forall + \, $v_x$\in\, $H:$\, \exists\, $C_x = \{c_0, c_1, ...,... + +? +! Missing $ inserted. + + $ +l.73 that is, \forall\, $v_ + x$\in\, $H:$\, \exists\, $C_x = \{c_0, c_1, ...,... + +? +! Missing $ inserted. + + $ +l.73 that is, \forall\, $v_x$\in + \, $H:$\, \exists\, $C_x = \{c_0, c_1, ...,... + +? +! Missing $ inserted. + + $ +l.73 ... \forall\, $v_x$\in\, $H:$\, \exists\, $C_ + x = \{c_0, c_1, ..., c_{n... + +? +[1 + +{/usr/share/texmf/fonts/map/pdftex/updmap/pdftex.map}] +! Missing $ inserted. + + $ +l.132 ...}^{numProc-1} ($number of local vertices_ + i$)$. +? +! Missing $ inserted. + + $ +l.133 + +? +[2] [3] <128_time.pdf, id=61, 794.97pt x 614.295pt> +File: 128_time.pdf Graphic file (type pdf) + +<128_cutl.pdf, id=62, 794.97pt x 614.295pt> +File: 128_cutl.pdf Graphic file (type pdf) + [4 <./128_time.pdf + +pdfTeX warning: pdflatex (file ./128_time.pdf): PDF inclusion: Page Group detec +ted which pdfTeX can't handle. Ignoring it. +>] <128_breakdown_percent.pdf, id=76, 794.97pt x 614.295pt> +File: 128_breakdown_percent.pdf Graphic file (type pdf) + + <2_time.pdf, id=77, 794.97pt x 614.295pt> +File: 2_time.pdf Graphic file (type pdf) + + <2_cutl.pdf, id=78, 794.97pt x 614.295pt> +File: 2_cutl.pdf Graphic file (type pdf) + +<2_breakdown_percent.pdf, id=79, 794.97pt x 614.295pt> +File: 2_breakdown_percent.pdf Graphic file (type pdf) + + [5] [6 <./128_cutl.pdf + +pdfTeX warning: pdflatex (file ./128_cutl.pdf): PDF inclusion: Page Group detec +ted which pdfTeX can't handle. Ignoring it. +> <./128_breakdown_percent.pdf + +pdfTeX warning: pdflatex (file ./128_breakdown_percent.pdf): PDF inclusion: Pag +e Group detected which pdfTeX can't handle. Ignoring it. +>] [7 <./2_time.pdf + +pdfTeX warning: pdflatex (file ./2_time.pdf): PDF inclusion: Page Group detecte +d which pdfTeX can't handle. Ignoring it. +> <./2_cutl.pdf + +pdfTeX warning: pdflatex (file ./2_cutl.pdf): PDF inclusion: Page Group detecte +d which pdfTeX can't handle. Ignoring it. +>] [8] [9 <./2_breakdown_percent.pdf + +pdfTeX warning: pdflatex (file ./2_breakdown_percent.pdf): PDF inclusion: Page +Group detected which pdfTeX can't handle. Ignoring it. +>] (./hybrid_current.aux) ) +Here is how much of TeX's memory you used: + 3336 strings out of 256216 + 44724 string characters out of 1917073 + 104735 words of memory out of 1500000 + 6577 multiletter control sequences out of 10000+200000 + 8770 words of font info for 32 fonts, out of 1200000 for 2000 + 645 hyphenation exceptions out of 8191 + 27i,9n,36p,252b,420s stack positions out of 5000i,500n,6000p,200000b,15000s + + +Output written on hybrid_current.pdf (9 pages, 186635 bytes). +PDF statistics: + 186 PDF objects out of 1000 (max. 8388607) + 27 named destinations out of 1000 (max. 131072) + 103 words of extra memory for PDF output out of 10000 (max. 10000000) + diff --git a/Zoltan-3.90/doc/NEA_docs/writeup/hybrid_current.out b/Zoltan-3.90/doc/NEA_docs/writeup/hybrid_current.out new file mode 100644 index 00000000..c8a1a16d --- /dev/null +++ b/Zoltan-3.90/doc/NEA_docs/writeup/hybrid_current.out @@ -0,0 +1,7 @@ +\BOOKMARK [1][-]{section.1}{Introduction}{} +\BOOKMARK [1][-]{section.2}{Parallel hypergraphs and geometric input}{} +\BOOKMARK [1][-]{section.3}{PHG, MPI and 2-dimensional representation}{} +\BOOKMARK [1][-]{section.4}{Matching}{} +\BOOKMARK [1][-]{section.5}{Reduction factor}{} +\BOOKMARK [1][-]{section.6}{Results}{} +\BOOKMARK [1][-]{section.7}{Conclusion and discussion}{} diff --git a/Zoltan-3.90/doc/NEA_docs/writeup/hybrid_current.pdf b/Zoltan-3.90/doc/NEA_docs/writeup/hybrid_current.pdf new file mode 100644 index 00000000..133e9c58 Binary files /dev/null and b/Zoltan-3.90/doc/NEA_docs/writeup/hybrid_current.pdf differ diff --git a/Zoltan-3.90/doc/NEA_docs/writeup/hybrid_current.tex b/Zoltan-3.90/doc/NEA_docs/writeup/hybrid_current.tex new file mode 100644 index 00000000..ba560892 --- /dev/null +++ b/Zoltan-3.90/doc/NEA_docs/writeup/hybrid_current.tex @@ -0,0 +1,296 @@ +\documentclass[12pt]{article} + +\usepackage{amsmath} % need for subequations +\usepackage{graphicx} % need for figures +\usepackage{verbatim} % useful for program listings +\usepackage{color} % use if color is used in text +\usepackage{subfigure} % use for side-by-side figures +\usepackage{hyperref} % use for hypertext links, including those to external documents and URLs + +\setlength{\baselineskip}{16.0pt} % 16 pt usual spacing between lines +\setlength{\parskip}{3pt plus 2pt} +\setlength{\parindent}{20pt} +\setlength{\oddsidemargin}{0.5cm} +\setlength{\evensidemargin}{0.5cm} +\setlength{\marginparsep}{0.75cm} +\setlength{\marginparwidth}{2.5cm} +\setlength{\marginparpush}{1.0cm} +\setlength{\textwidth}{150mm} + +\begin{comment} +\pagestyle{empty} +\end{comment} + + + +\begin{document} + +\begin{center} +{\large Hybrid Partitioning in Zoltan} \\ +Nick Aase, Karen Devine \\ +Summer, 2011 +\end{center} + + +\section{Introduction} +When used for partitioning, Zoltan has a wide range of algorithms +available to it. Traditionally they have fallen into two categories: +geometric-based partitioning, and topology-based partitioning. Each +method has its own strengths and weaknesses which ultimately come down +to the tradeoff between speed and quality, and the onus is placed +upon the user to determine which is more desirable for the project +at hand. + +In our project we strived to develop a hybrid partitioning algorithm; +one that attempts to take advantage of the efficiency of geometric +methods, as well as the precision of topological ones. The reasoning +behind this concept is that problem sets with large amounts of data may +be more easily digestible by topological methods if they are first +reduced into managable pieces based on their geometry. + +The two subjects chosen for this project were the Recursive +Coordinate Bisection (RCB) algorithm and Parallel Hypergraph +partitioning (PHG). RCB is an extremely fast method of partitioning, +but it can be clumsy at times when it ``cuts'' across a coordinate plane. +On the other hand, PHG has a good understanding of the relationships +between data, making its partitioning quite accurate, but it suffers +from having to spend a great deal of time finding those relationships. + +For further information on implementing hybrid partitioning, please see +the developer's guide at +http://www.cs.sandia.gov/Zoltan/dev\_html/dev\_hybrid.html + + +\section{Parallel hypergraphs and geometric input} +In order for Zoltan to support hybrid partitioning, it is necessary +to properly and frequently obtain, preserve, and communicate coordinate +data. The first step that needed to be taken was to modify PHG to +support coordinate information. Hypergraph objects carry a substantial +amount of data already, but we had to add an array of floating point +values to store the coordinates. Currently, when a hypergraph is built and +geometric information is available from the input, each vertex will have +a corresponding subset within the array defining its coordinates; +that is, \forall\, $v_x$\in\, $H:$\, \exists\, $C_x = \{c_0, c_1, ..., c_{n-1}\},$ +where $v_x$ is an arbitrary vertex in the hypergraph $H$, $C_x$ is its +corresponding coordinate subset, and $n$ is the number of dimensions in +the system. In this way, Zoltan can treat each coordinate subset as an +element of that vertex + + +\section{PHG, MPI and 2-dimensional representation} +PHG is interesting in that multiple processors can share partial data +that describes the properties of hyperedges and vertices. This sort of +system can be represented in a 2-dimensional distribution similar to +Table 1. A populated field represents that a processor on the y-axis has +data related to the vertex on the x-axis. In this example, you can see +that processor $P_0$ and $P_2$ share data describing vertices $v_0$ and +$v_2$. + +\begin{table}[h] +\begin{center} +\begin{tabular}{|r|l|l|l|} + \hline + Processor & $v_0$ & $v_1$ & $v_2$ \\ + \hline + $P_0$ & x & & x \\ + \hline + $P_1$ & & x & \\ + \hline + $P_2$ & x & & x \\ + \hline +\end{tabular} +\caption{\label{tab:0/tc} Before communication} +\end{center} +\end{table} + +Using Message Passing Interface (MPI) communicators, it is possible to +communicate with processors by column. We use an \texttt{MPI\_Allreduce} +call to collect data from each processor, which groups them into a usable +form. Consider Table 2. + +\begin{table}[h] +\begin{center} +\begin{tabular}{|r|l|l|l|} + \hline + Processor & $v_0$ & $v_1$ & $v_2$ \\ + \hline + $P_0$ & x & & \\ + \hline + $P_1$ & & x & \\ + \hline + $P_2$ & & & x \\ + \hline +\end{tabular} +\caption{\label{tab:1/tc} After communication} +\end{center} +\end{table} + +This same sort of operation is performed with weight data, so implementing +it on coordinate data was simply another step in setting up PHG to support +coordinate information from the input. Afterwards the entirity of a vertex's +data will be unique to a single processor, with the number of global +vertices == $\sum_{i=0}^{numProc-1} ($number of local vertices_i$)$. + + +\section{Matching} +There are several matching methods already native to Zoltan and specific to +PHG, but we needed to create a new method in order to use RCB on the +hypergraph data. Before the actual matching occurs several specialized +callbacks and parameters are registered. Doing this is crucial if RCB and PHG +are to interface properly with each other. + +The next task is to physically call RCB. It was easy enough to send PHG +data to RCB as we simply used the \texttt{Zoltan\_LB\_Partition} wrapper, +not unlike other standard load balancing partitioners. However, getting +matchings \emph{back} from RCB to PHG was another matter entirely. Thanks to +Dr. Devine's work, we were able to ostensibly comondeer one of RCB's unused +return values: since all matching algorithms conform syntactically to the +afforementioned load-balancing wrapper, there are some arguments and/or +values that are never used depending on what data that partitioner needs In +the case of RCB, the return value \texttt{*export\_global\_ids}, which is +defined in its prototype, was never actually computed. Dr. Devine was able +to rewire RCB so that, when using hybrid partitioning, it would return the +IDs of the matchings we need for each hypergraph (which are referred to in +the matching procedure as \emph{candidates}). + +This new matching procedure is similar to PHG's agglomerative matching, +whereby candidate vertices are selected to represent groups of similar +vertices. These candidates then make up the standard vertices in the +resultant coarse hypergraph. The major difference is that standard +agglomerative matching determines its candidates by the connectivity of +vertices to one another; the more heavily connected a subset of vertices +is, the more likely they will share the same candidate. Using RCB means +making the assumption that related vertices will be geometrically similar: +recursive geometric cuts will be more likely to naturally bisect less +connected parts of the hypergraph, and the vertices that are members of +the resulting subdomains will share the same candidates. Given RCB's +track record, this method should be significantly faster than the +agglomerative matching. + + +\section{Reduction factor} +When using hybrid partitioning, the user passes a parameter in the input +file called \texttt{HYBRID\_REDUCTION\_FACTOR}, which is a number $> 0$ +and $\leq 1$ that gets passed into RCB. This parameter defines the +aggressiveness of the overall procedure. This number simply determines +the amount by which the larger graph will be reduced (e.g. for the +original, fine hypergraph, $H_f$, where the number of vertices +$|V_f| == 1000$, and a reduction factor of $f == 0.1$, the coarse hypergraph, +$H_c$, will have $|V_c| == 100$ vertices). + +This gives the user more control over the balance between quality +and efficiency. + + +\section{Results} +We ran experiments primarily with 2 and 128 processors on the Odin cluster +at Sandia National Labs, though there were brief, undocumented forees with +16 and 32 processors as well. Odin has two AMD Opteron 2.2GHz processors +and 4GB of RAM on each node, which are connected with a Myrinet network +\cite{Catalyurek}. The partitioning methods used were RCB, PHG, and hybrid +partitioning with a reduction factor of 0.01, 0.05, and 0.1. Each run went +through 10 iterations of the scenario. The runs with 128 processors were +given 5 different meshes to run on, whereas the 2 processor runs only ran +on the 4 smaller meshes, as the cluster was undergoing diagnostics at the +time of the experiements. + +%NEED TIMES @ 128 PROCS +\begin{figure}[hgp] + \centering + \includegraphics[width=\textwidth, height=80mm]{128_time.pdf} + \caption{Runtimes on 128 processors}\label{fig:Times_np_128} +\end{figure} + + + +%NEED cutl @ 128 PROCS +\begin{figure}[hgp] + \centering + \includegraphics[width=\textwidth, height=70mm]{128_cutl.pdf} + \caption{Cuts on 128 processors}\label{fig:Cuts_np_128} +\end{figure} + +You can see from Figure 1 and 2 that at 128 processors the hybrid methods +are mainly slower than PHG and less accurate than RCB: both results are +the inverse of what we had hoped. There was better news looking at where +the processes were taking their time though: + +%timer breakdowns for 128 +\begin{figure}[hgp] + \centering + \includegraphics[width=\textwidth, height=70mm]{128_breakdown_percent.pdf} + \caption{Timing by percentage on 128 processors (UL, Shockstem 3D; UR, + Shockstem 3D -- 108; LL, RPI; LR, Slac1.5}\label{fig:Percent_np_128} +\end{figure} + +The dramatic decrease in the matching time meant that RCB was, indeed, +helping on that front. + +When we ran our simulations in serial, however, we saw some very different +results: + +%times, cutl +\begin{figure}[hgp] + \centering + \includegraphics[width=\textwidth, height=80mm]{2_time.pdf} + \caption{Runtimes in serial on 2 processors}\label{fig:Times_np_2} +\end{figure} + + + +%NEED cutl @ 128 PROCS +\begin{figure}[hgp] + \centering + \includegraphics[width=\textwidth, height=70mm]{2_cutl.pdf} + \caption{Cuts in serial on 2 processors}\label{fig:Cuts_np_2} +\end{figure} + +In general the hybrid times beat the PHG times, and the hybrid cuts beat +the RCB cuts. + +%time breakdowns for 2 +\begin{figure}[hgp] + \centering + \includegraphics[width=\textwidth, height=70mm]{2_breakdown_percent.pdf} + \caption{Timing by percentage on 2 processors (UL, Shockstem 3D; UR, + Shockstem 3D -- 108; LL, RPI; LR, Slac1.5}\label{fig:Percent_np_2} +\end{figure} + +Looking at individual timers in this serial run, we can see that RCB has +still drastically reduced the matching time. In addition, the slowdown in +the coarse partitioning has been greatly reduced. + +\section{Conclusion and discussion} +The parallel implementation of hybrid partitioning is obviously not +functioning as desired, but we believe that there is ultimately a great +deal of promise in this method. Seeing the results from our serial runs +is encouraging, and it would be worth the effort to continue forward. + +Perhaps it would be helpful to check for any communication issues arising +between processors. The whole system could potentially drag, was a +single processor waiting for a message. Additionally, Dr. Catalyurek had +suggested only using RCB-based coarsening on the largest, most complex +hypergraphs, and then revert to standard agglomerative matching for +coarser iterations. + +At this moment, there could be four different ways to use Dr. Catalyurek's +method: the first, and perhaps simplest of the three, would be to hardwire +in the number of coarsening levels to give to RCB. A second way would be +to define a new parameter to allow the user to select the number of +RCB-based coarsenings. A third would be to write a short algorithm to +determine and use the optimal number of layers based off of the input. +Finally, there could be an option of user input, with a default to +be either of the other ways. + +\begin{thebibliography}{5} + +\bibitem{Catalyurek}U.V. Catalyurek, E.G. Boman, K.D. Devine, D. Bozdag, + R.T. Heaphy, and L.A. Riesen. \emph{A Repartitioning Hypergraph Model + for Dynamic Load Balancing.} Sandia National Labs, 2009. + +\end{thebibliography} + +{\small \noindent August 2011.} +\end{document} + + diff --git a/Zoltan-3.90/doc/NEA_docs/writeup/odin-data.ods b/Zoltan-3.90/doc/NEA_docs/writeup/odin-data.ods new file mode 100644 index 00000000..3d2b0e96 Binary files /dev/null and b/Zoltan-3.90/doc/NEA_docs/writeup/odin-data.ods differ diff --git a/Zoltan-3.90/doc/Tutorial/zoltan_tutorial_document.pdf b/Zoltan-3.90/doc/Tutorial/zoltan_tutorial_document.pdf new file mode 100644 index 00000000..13dd4183 Binary files /dev/null and b/Zoltan-3.90/doc/Tutorial/zoltan_tutorial_document.pdf differ diff --git a/Zoltan-3.90/doc/Tutorial/zoltan_tutorial_presentation.pdf b/Zoltan-3.90/doc/Tutorial/zoltan_tutorial_presentation.pdf new file mode 100644 index 00000000..665a9187 Binary files /dev/null and b/Zoltan-3.90/doc/Tutorial/zoltan_tutorial_presentation.pdf differ diff --git a/Zoltan-3.90/doc/Zoltan_html/Zoltan.html b/Zoltan-3.90/doc/Zoltan_html/Zoltan.html new file mode 100644 index 00000000..256a813c --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/Zoltan.html @@ -0,0 +1,1041 @@ + + + + + + + + + + + + + + + Zoltan + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+[Sandia National Laboratories] +

[navigation panel]

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
Zoltan +Home Page
+
+ + + + +
Zoltan +User's Guide
+
+ + + + +
+Frequently Asked Questions
+
+ + + + +
Zoltan +Project Description
+
+ + + + +
Papers +and Presentations
+
+ + + + +
How to Cite Zoltan
+
+ + + + +
Download +Zoltan
+
+ + + + +
Report a Zoltan Bug
+
+ + + + +
+Contact Zoltan Developers
+
+ + + + +
+Sandia Privacy and Security Notice
+
+
Zoltan:  +
Parallel Partitioning, Load Balancing and Data-Management Services + +

+ + + + +


+

+

The Zoltan Team

+ + + + + +
+ Sandia National Laboratories
+Erik Boman
+Karen Devine
+Vitus Leung
+Sivasankaran Rajamanickam
+Michael Wolf
+
+ Georgia Tech
+Umit Catalyurek
+
+
+

Past Zoltan Contributors

+ + + + + + + + + + +
+ Sandia National Laboratories:
+Cédric Chevalier (currently at CEA, DAM)
+Robert Heaphy
+Bruce Hendrickson
+Matthew St. John
+Lee Ann Riesen
+Courtenay Vaughan
+
+
+ Siena College
+James Teresco
+
+ National Institute of Standards and Technology
+William F. Mitchell
+
+ Rensselaer Polytechnic Institute
+Jamal Faik
+Luis Gervasio
+
+ Google (formerly Ohio State University)
+Doruk Bozdag
+
+

+

+ + +


+ Tutorials:
+
    +
  • +"Getting Started with Zoltan: A Short Tutorial."
    +A ten-page introduction to using Zoltan is now available. Download it here:
    +PDF +        +Citation (BIBTEX) +        +Viewgraphs from the tutorial (PDF) +

    +
  • +
  • +The Zoltan Tutorial presented at the 2010 ACTS Workshop is available here. + +
  • +
  • +The Zoltan Tutorial presented at SciDAC2007 is now on-line. Download it here: + +
  • +User's Guide (HTML) +
  • +
+ +
+ Zoltan News:
+
    +
  • +Zoltan v3.83 +was released in January 2016 as part of Trilinos v12.6. +This release includes only minor software changes. +See more details below. +

    +
  • +Zoltan v3.82 +was released in May 2015 as part of Trilinos v12. +This release includes only minor software changes. +See more details below. +

    +
  • +Zoltan v3.81 +was released in November 2014 as part of Trilinos v11.12. +This release upgrades Zoltan's interface to ParMETIS v4 and Scotch v6. +See more details below. +

    +
  • +Zoltan v3.8 +was released in October 2013 as part of Trilinos v10.11. +This release uses Trilinos' BSD license! See more details below. +

    +
  • +
  • +Zoltan v3.6 +was released in September 2011 as part of Trilinos v10.8. +This release includes new recoloring capability in Zoltan's coloring algorithms. +Zoltan's hierarchical partitioning algorithm was +also updated for greater efficiency. +We have updated our third-party library support to include +PT-Scotch versions up to 5.1.12 and ParMETIS v3.1 and v4.0. +

    +
  • +
  • +Zoltan v3.5 was released in March 2011. This release +differs from the Trilinos 10.6 release by including the following features: +
      +
    • +Fix in Fortran90 interface that causes compilation and run-time problems +with gcc 4.5 and later when compiler optimization is enabled. +
    • +
    • +Support for 64-bit builds of Zoltan, enabling operation on more than 2B objects. +See details for building in the Zoltan User's Guide. +
    • +
    • +Faster graph builds for very specific input types. See parameter +GRAPH_BUILD_TYPE. +
    • +
    +

    +
  • +
  • +The Zoltan team received the "Best Algorithms Paper Award" at the +2007 International Parallel and Distributed +Processing Symposium (IPDPS07) +for their paper +"Hypergraph-based Dynamic Load Balancing for Adaptive Scientific Computations." +
    +Abstract (HTML) +     +Paper (PDF) +     +Citation (BIBTEX) +

    +

  • +
  • +Zoltan was part of several funded DOE SciDAC projects: +
      +
    • +SciDAC2 CSCAPES: Combinatorial Scientific Computing +and Petascale Simulations
      +Contact: Erik Boman +
    • +
    • +SciDAC2 ITAPS: Interoperable +Technologies for Advanced Petascale Simulations
      +Contact: Karen Devine
      +
    • +
    • +SciDAC3 to SciDAC5: FASTMath: Frameworks, algorithms and scalable technologies +for mathematics +
      +Contact: Karen Devine
      +
    • +
    +
  • +
+

+ + +


+Now available: Zoltan 3.83
+

+ +New Features in Zoltan v3.83, released January 2016. +Release Notes +      |       +Backward Compatibility +

    +
  • Autotools files updated to avoid deprecated perl features
  • +
  • Added Zoltan_Get_Fn interface to return pointers to registered callback +functions.
  • +
  • Several bug fixes in Fortran90 interface
  • +
  • Subtle rounding error fixed in PHG hypergraph partitioner
  • +
  • Minor changes to distributed data directory to track number of nodes +per processor; include file DD.h is now named zoltan_dd_const.h
  • +
+ +

+ +New Features in Zoltan v3.82, released May 2015. +Release Notes +      |       +Backward Compatibility +

    +
  • Minor code changes to remove compiler warnings and handle error conditions
  • +
  • Enable "make -j" in autotools build of Zoltan's F90 interface.
  • +
+

+ +New Features in Zoltan v3.81, released November 2014: +Release Notes +      |       +Backward Compatibility +

    +
  • Better integration of ParMETIS v4, METIS v5, and Scotch v6.
  • +
  • Better handling of interface differences between MPI versions 1 and 2.
  • +
  • Bug fix in siMPI for non-MPI builds.
  • +
  • Minor bugfixes.
  • +
+

+ +New Features in Zoltan v3.8, released October 2013: +Release Notes +      |       +Backward Compatibility +

    +
  • Zoltan is now released under Trilinos' BSD license.
  • +
  • Added an improved hash function that speeds lookups, +particularly for apps using partitioning parameter RETURN_LISTS=PARTS.
  • +
  • Added fixes for integer overflows that occur when A and B are valid +integers, but A*B overflows an integer. +
  • Removed support for OCT partitioning; use HSFC partitioning for similar decompositions.
  • +
  • Removed support for third-party library DRUM.
  • +
  • Removed support for the Zoltan v1.x interface.
  • +
+ +

+ +New Features in Zoltan v3.6, released September 2011: +Release Notes +      |       +Backward Compatibility +

    +
  • +Recoloring capability in Zoltan's coloring algorithms, obtaining colorings +with fewer colors. +
  • +
  • +Support for graph partitioning and ordering with PT-Scotch up to version 5.1.12. +
  • +
  • +Support for graph partitioning and ordering with ParMETIS v3.1 and v4.0. +
  • +
  • +Improved hierarchical partitioning performance. +
  • +
  • +Deprecation of OCT methods and interface defined in lbi_const.h. See the +backward compatibility notes for more +info. +
  • +
+ +

+ +New Features in Zoltan v3.5, released March 2011: +Release Notes +      |       +Backward Compatibility +

    +
  • +Fix in Fortran90 interface that causes compilation and run-time problems +with gcc 4.5 and later when compiler optimization is enabled. +
  • +
  • +Support for 64-bit builds of Zoltan, enabling operation on more than 2B objects. +See details for building in the Zoltan User's Guide. +
  • +
  • +Faster graph builds for very specific input types. See parameter +GRAPH_BUILD_TYPE. +
  • +
+ +

+ +New Features in Zoltan v3.3, released July 2010: + +Release Notes +      |       +Backward Compatibility + +

    +
  • +New local ordering method based on +space-filling curves to improve +memory and cache locality within a processor. +
  • +
  • +Ability to call graph partitioning algorithms using hypergraph callback +functions; this capability is useful applications with, say, block-structured +matrix distributions (e.g., SuperLU), where all information about a matrix +row or column is not available on a single processor. +
  • +
  • +Improved execution time of parallel hypergraph partitioning. +
  • +
+ + + + +

+New Features in Zoltan v3.2, released September 2009: +Release Notes +      |       +Backward Compatibility + +

    +
  • +New interface +to +Scotch and PT-Scotch parallel graph partitioning +algorithms. +
  • +
  • +Simplified interface to graph ordering +and coloring algorithms +
  • +
  • +Automated symmetrization of graphs for graph partitioning, coloring +and ordering. +(See parameters GRAPH_SYMMETRIZE and GRAPH_SYM_WEIGHT in the +Scotch and +ParMETIS graph packages.) +
  • +
  • +Improved function +Zoltan_LB_Eval + returns more information about a decomposition to users. +
  • +
  • +Improved examples showing Zoltan usage in C and C++ +are included in zoltan/example. +
  • +
  • +Improved support for builds under autotools, +including builds of Zoltan's F90 interface. +
  • +
  • +New support for CMake builds +and testing through Trilinos; builds of +Zoltan's F90 interface are included. +
  • +
  • +Improved integration into +Isorropia +partitioners for Trilinos' Epetra classes. +
  • +
+ + + + + +

+New Features in Zoltan v3.1, released September 2008: +

+ + + + +New Features in Zoltan 3.0, released May 2007: + +

+New Features in Zoltan 2: +

+ +See the +release notes for descriptions +of new functionality and more. + + + + + +
+The Zoltan Library provides critical data-management services +to a wide range of parallel applications.  Zoltan includes many utilities +needed by unstructured and/or adaptive parallel applications. These utilities +include + + + +Zoltan's object-oriented interface is easy-to-use and enables +Zoltan to be used by a number of different applications.  Zoltan is +designed to be flexible and extensible, so different algorithms can be +used, compared and added easily. + +

Why Zoltan is needed: +

    +
  • +In some applications, work loads and/or geometric locality change as computations +proceed;
    Zoltan provides dynamic redistribution of data to maintain high +performance. 
  • + +
      +
    • +Adaptive finite element methods
    • + +
    • +Particle methods
    • + +
    • +Contact detection algorithms
    • + +
    • +Multiphysics simulations
    • + +
    • +Adaptive physics models
    • +
    + +
  • +Processors need to track off-processor data's locations in dynamic +environments; Zoltan's distributed data directories allow applications to +efficiently query and update off-processor data locations. +
  • + +
  • +Unstructured and adaptive applications require complicated interprocessor +communication; Zoltan's unstructured communication package +manages sends and receives for application developers. +
  • + +
  • +State-of-the-art parallel computers often do not have sophisticated +debugging +tools available; Zoltan's dynamic memory management package simplifies +location of memory leaks and other memory errors. +
  • + +
+ +Advantages of using Zoltan in applications: + + +Advantages of using Zoltan for developing new algorithms: + + +Zoltan philosophy and project description: + + +Related links: + +
+Copyright (c) 2000-2013, Sandia National Laboratories.
+See the README file in the main Zoltan directory for information on Zoltan licensing. +
+ +
+ +
+ + + + + + +
+ + + + + + +
+
+ + + + + + + + + + + + + + + diff --git a/Zoltan-3.90/doc/Zoltan_html/Zoltan_FAQ.html b/Zoltan-3.90/doc/Zoltan_html/Zoltan_FAQ.html new file mode 100644 index 00000000..edbdb145 --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/Zoltan_FAQ.html @@ -0,0 +1,527 @@ + + + + + + + + + + + + + + + Zoltan + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+[Sandia National Laboratories] +

[navigation panel]

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
Zoltan +Home Page
+
+ + + + +
Zoltan +User's Guide
+
+ + + + +
Zoltan +Developer's Guide
+
+ + + + +
+Frequently Asked Questions
+
+ + + + +
Zoltan +Project Description
+
+ + + + +
Papers +and Presentations
+
+ + + + +
How to Cite Zoltan
+
+ + + + +
Download +Zoltan
+
+ + + + +
Report a Zoltan Bug
+
+ + + + +
+Contact Zoltan Developers
+
+ + + + +
+Sandia Privacy and Security Notice
+
+
Zoltan:  +
Parallel Partitioning, Load Balancing and Data-Management Services +

+Frequently Asked Questions + +

+ + + + +


+

+

    + + + + + + +
  1. How do I upgrade from the Zoltan v1 interface (in +lbi_const.h) to the current Zoltan interface (in zoltan.h)? +
  2. Zoltan's hypergraph partitioner +is returning empty parts, that is, parts that have zero +objects in them. Is this a bug? +
  3. On some platforms, why do Zoltan partitioning +methods RCB and RIB use an increasing amount of memory over multiple +invocations? +
  4. Why does compilation of the Fortran interface hang +with Intel's F90 compiler? +
  5. During runs (particularly on RedStorm), MPI +reports that it is out of resources or too many messages have been posted. +What does this mean and what can I do? +
  6. On very large problems, +Zoltan communication routines fail in MPI_Alltoallv. +Why does this happen and what can I do? +
  7. Realloc fails when there is plenty of memory. Is this a Zoltan bug? +
  8. What does the following message mean during +compilation of zoltan: +Makefile:28: mem.d: No such file or directory +
+

+


+
+

+

    + + + + + + +
    +
  1. How do I upgrade from the Zoltan v1 interface (in +lbi_const.h) to the current Zoltan interface (in zoltan.h)? +

    +The Zoltan interface was revised in version 1.3 to include "Zoltan" in +function names and defined types. Upgrading to this interface is easy. +

      +
    • Include "zoltan.h" instead of "lbi_const.h" in your source files. +
    • For most Zoltan functions and constants, prefix "LB_" is replaced +by "Zoltan_"; for example, "LB_Set_Param" is now "Zoltan_Set_Param." +A few exceptions exist; for example, +"LB_Balance" is Zoltan_LB_Balance; "LB_Free_Data" is "Zoltan_LB_Free_Data." +See the Release v1.3 +backward compatibility notes for a complete list of name changes. +
    • Fortran90 applications should define user-defined data +in zoltan_user_data.f90 rather than lb_user_const.f90. +
    +More complete details are in the +Release v1.3 +backward compatibility notes. +
    +
  2. Zoltan's hypergraph partitioner +is returning empty parts, that is, parts that have zero +objects in them. Is this a bug? +

    +The hypergraph partitioner creates partitions with up to a specified amount +of load imbalance; the default value is 10% imbalance allowed, but the user +can tighten the load imbalance. Any partition that satisfies the load +imbalance tolerance is a valid partition. As a secondary goal, the +hypergraph partitioner attempts to minimize interprocessor communication. +Having a part with zero weight almost certainly reduces total communication; +the zero-weight part would not need to communicate with any other part. +

    +So in some cases, Zoltan is generating a valid partition -- one that +satisfies the imbalance tolerance -- that happens to have lower total +communication if one of the parts is empty. This is a good thing, but one +that some applications don't like because they didn't consider having zero +weight on a processor. +

    +To try to avoid this problem, lower the imbalance tolerance so that +the partitioner is more likely to give work to all parts. Change the value +of Zoltan parameter +IMBALANCE_TOL +to a smaller value; e.g., 1.03 to allow only 3% imbalance:
    +Zoltan_Set_Param(zz, "IMBALANCE_TOL", "1.03"); +

    +As an alternative, you may try one of Zoltan geometric methods, such as +RCB, +RIB or + HSFC, which do not have this property. +

    +We may in the future add a parameter to disallow zero-weight parts, but at +present, we do not have that option. + +


    +
  3. On some platforms, why do Zoltan partitioning +methods RCB and RIB use an increasing amount of memory over multiple +invocations? +

    +Zoltan partitioning methods RCB and RIB use MPI_Comm_dup and MPI_Comm_split +to recursively create communicators with subsets of processors. +Some implementations of +MPI (e.g., the default MPI on Sandia's Thunderbird cluster) do not correctly +release memory associated with these communicators during MPI_Comm_free, +resulting in growing memory use over multiple invocations of RCB or RIB. +An undocumented workaround in +Zoltan is to set the TFLOPS_SPECIAL parameter to 1 (e.g., +Zoltan_Set_Param(zz,"TFLOPS_SPECIAL","1");), which causes an +implementation that doesn't use MPI_Comm_split to be invoked. + +


    +
  4. Why does compilation of the Fortran interface hang +with Intel's F90 compiler? +

    +There is a bug in some versions of Intel's F90 compiler. We know +Zoltan's Fortran interface compiles with Intel's F90 compiler versions +10.1.015 through 11.1.056. We know that it does not compile with +versions 11.1.059, 11.1.069 and 11.1.072. We reported the problem to +Intel, and we are told that the compiler bug is fixed in version 11.1 update 7, +which is scheduled for release in August 2010. See this +Intel +Forum link for more details. + +


    +
  5. During runs (particularly on RedStorm), MPI +reports that it is out of resources or too many messages have been posted. +What does this mean and what can I do? +

    +Some implementations of MPI (including RedStorm's implementation) limit +the number of message receives that can be posted simultaneously. Some +communications in Zoltan (including hashing of IDs to processors in the +Zoltan Distributed Data Directory) can require messages from large numbers +of processors, triggering this error on certain platforms. +

    +To avoid this problem, Zoltan contains logic to use AllToAll communication +instead of point-to-point communication when a large number +of receives are needed. The maximum number of simultaneous receives allowed +can be set as a compile-time option to Zoltan. +In the Autotool build +environment, option --enable-mpi-recv-limit=# sets the +maximum number of simultaneous receives allowed. The default value is 4. + +


    +
  6. On very large problems, +Zoltan communication routines fail in MPI_Alltoallv. +Why does this happen and what can I do? +

    +For very large problems, the values in the displacement arrays needed +by MPI_Alltoallv can exceed INT_MAX (the largest integer that can be stored +in 32 bits). The solution to this problem is to make Zoltan avoid using +MPI_Alltoallv and, instead, use point-to-point sends and receives. The +compile-time option +in the Autotool build +environment is --enable-mpi-recv-limit=0. + + +


    +
  7. Realloc fails when there is plenty of memory. Is this a Zoltan bug? +

    +This problem has been noted on different Linux clusters running parallel +applications using different MPI libraries and C++ libraries. +Realloc fails where a malloc call will succeed. The source of the error has +not been identified, but it is not a Zoltan bug. The +solution is to compile Zoltan with the flag -DREALLOC_BUG. +Zoltan will replace +every realloc call with malloc followed by a memcpy and a free. + +


    +
  8. What does the following message mean during +compilation of Zoltan?
    +Makefile:28: mem.d: No such file or directory +

    +In the old "manual" build system for Zoltan, dependency files were +generated for each source file filename.c. The first time Zoltan +was built for a given platform, the dependency files do not exist. +After producing this +warning, gmake created the dependency files it needed and continued +compilation. +

    +Newer versions of Zoltan use autotools or cmake for builds and, thus, do +not produce this warning. +

+ +

+


+Updated: August 2, 2010 + + + +
+Copyright (c) 2000-2012, Sandia National Laboratories.
+
+ +
+ +
+ + + + + + +
+ + + + + + +
+
+ + + + + + + + + + + + + + + diff --git a/Zoltan-3.90/doc/Zoltan_html/Zoltan_bugreport.html b/Zoltan-3.90/doc/Zoltan_html/Zoltan_bugreport.html new file mode 100644 index 00000000..778c79c3 --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/Zoltan_bugreport.html @@ -0,0 +1,338 @@ + + + + + + + + + + + + + + + Zoltan + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+[Sandia National Laboratories] +

[navigation panel]

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
Zoltan +Home Page
+
+ + + + +
Zoltan +User's Guide
+
+ + + + +
Zoltan +Developer's Guide
+
+ + + + +
+Frequently Asked Questions
+
+ + + + +
Zoltan +Project Description
+
+ + + + +
Papers +and Presentations
+
+ + + + +
How to Cite Zoltan
+
+ + + + +
Download +Zoltan
+
+ + + + +
Report a Zoltan Bug
+
+ + + + +
+Contact Zoltan Developers
+
+
+ + + + +Zoltan: + +
+ +Data-Management Services for Parallel Applications + +

+ + + + + + + +Reporting Zoltan Bugs + +


+ +The Zoltan team uses Trilinos' Github to +collect bug reports. +In your bug reports, please mention "@trilinos/zoltan" so that your bug report +is sent to Zoltan developers. +

+ + +


+ +
+ +
+ + + + + + +
+ + + + + + +
+
+ + + + + + + + + + + + + + + diff --git a/Zoltan-3.90/doc/Zoltan_html/Zoltan_cite.html b/Zoltan-3.90/doc/Zoltan_html/Zoltan_cite.html new file mode 100644 index 00000000..5849d942 --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/Zoltan_cite.html @@ -0,0 +1,437 @@ + + + + + + + + + + + + + + + Zoltan + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+[Sandia National Laboratories] +

[navigation panel]

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
Zoltan +Home Page
+
+ + + + +
Zoltan +User's Guide
+
+ + + + +
Zoltan +Developer's Guide
+
+ + + + +
+Frequently Asked Questions
+
+ + + + +
Zoltan +Project Description
+
+ + + + +
Papers +and Presentations
+
+ + + + +
How to Cite Zoltan
+
+ + + + +
Download +Zoltan
+
+ + + + +
Report a Zoltan Bug
+
+ + + + +
+Contact Zoltan Developers
+
+
+ + + + +Zoltan: + +
+ +Data-Management Services for Parallel Applications + +

+ + + + + + + +How to Cite Zoltan +
+


+ + +

+Please use (at least) one of the following references when you cite Zoltan: + +

+ +@Article{ZoltanIsorropiaOverview2012,
+   author = {E. G. Boman and U. V. Catalyurek and C. Chevalier and K. D. Devine},
+   title = {The {Z}oltan and {I}sorropia Parallel Toolkits for Combinatorial Scientific Computing: Partitioning, Ordering, and Coloring},
+   journal = {Scientific Programming},
+   year = {2012},
+   volume = {20},
+   number = {2},
+   pages = {129--150}
+}
+
+ +

+ +@Article{ZoltanOverviewArticle2002,
+   author = {Karen Devine and Erik Boman and Robert Heaphy and + Bruce Hendrickson and Courtenay Vaughan},
+   title = {{Zoltan} Data Management Services for Parallel + Dynamic Applications},
+   journal = {Computing in Science and Engineering},
+   year = {2002},
+   volume = {4},
+   number = {2},
+   pages = {90--97}
+}
+
+ +

+ +@Unpublished{ZoltanHomePage,
+   author = {Erik Boman and Karen Devine and Lee Ann Fisk and + Robert Heaphy and Bruce Hendrickson and Vitus Leung + and Courtenay Vaughan and Umit Catalyurek and + Doruk Bozdag and William Mitchell},
+   title = {{Zoltan} home page},
+   note = {\url{http://cs.sandia.gov/Zoltan}},
+   year = {1999}}
+}
+
+ +

+ +@Manual{ZoltanUsersGuideV3,
+   title = {{Zoltan 3.0}: Parallel Partitioning, Load-balancing, and Data Management Services; User's Guide},
+   author = {Erik Boman and Karen Devine and Lee Ann Fisk and + Robert Heaphy and Bruce Hendrickson and + Courtenay Vaughan and Umit Catalyurek and + Doruk Bozdag and William Mitchell and James Teresco},
+   organization = {Sandia National Laboratories},
+   address = {Albuquerque, NM},
+   year = {2007},
+   note = {Tech. Report SAND2007-4748W + \url{http://cs.sandia.gov/Zoltan/ug_html/ug.html}}
+}
+
+ +

+ +@Manual{ZoltanDevelopersGuideV3,
+   title = {{Zoltan 3.0}: Parallel Partitioning, Load-balancing, and Data Management Services; Developer's Guide},
+   author = {Erik Boman and Karen Devine and Lee Ann Fisk and + Robert Heaphy and Bruce Hendrickson and + Courtenay Vaughan and Umit Catalyurek and + Doruk Bozdag and William Mitchell and James Teresco},
+   organization = {Sandia National Laboratories},
+   address = {Albuquerque, NM},
+   year = {2007},
+   note = {Tech. Report SAND2007-4749W, + \url{http://cs.sandia.gov/Zoltan/dev_html/dev.html}}
+}
+
+ +

+ +@Inproceedings{ZoltanHypergraphIPDPS06,
+   title = {Parallel Hypergraph Partitioning for + Scientific Computing},
+   author = {Karen D. Devine and Erik G. Boman and + Robert T. Heaphy and Rob H. Bisseling + and Umit V. Catalyurek},
+   conference = {IPDPS'06},
+   publisher = {IEEE},
+   year = {2006}
+}
+
+ +

+ +@Inproceedings{ZoltanParHypRepart07,
+   title = {Hypergraph-based Dynamic Load Balancing for Adaptive + Scientific Computations},
+   author = {U.V. Catalyurek and E.G. Boman and K.D. Devine + and D. Bozdag and R.T. Heaphy and L.A. Riesen},
+   booktitle = {Proc. of 21st International Parallel and + Distributed Processing Symposium (IPDPS'07)},
+   publisher = {IEEE},
+   year = {2007},
+   note = {Best Algorithms Paper Award.}
+}
+
+

+ +


+
+ +
+ + + + + + +
+ + + + + + +
+
+ + + + + + + + + + + + + + + diff --git a/Zoltan-3.90/doc/Zoltan_html/Zoltan_construction.html b/Zoltan-3.90/doc/Zoltan_html/Zoltan_construction.html new file mode 100644 index 00000000..497ad752 --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/Zoltan_construction.html @@ -0,0 +1,344 @@ + + + + + + + + + + + + + + + Zoltan + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+[Sandia National Laboratories] +

[navigation panel]

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
Zoltan +Home Page
+
+ + + + +
Zoltan +User's Guide
+
+ + + + +
Zoltan +Developer's Guide
+
+ + + + +
Zoltan +Project Description
+
+ + + + +
Papers +and Presentations
+
+ + + + +
How to Cite Zoltan
+
+ + + + +
Download +Zoltan
+
+ + + + +
Report a Zoltan Bug
+
+ + + + +
+Contact Zoltan Developers
+
+
+ + + + +Zoltan: + +
+ +Data-Management Services for Parallel Applications + +

+ + + + + + +


+

+ The Zoltan Team
+ Sandia National Laboratories:
+Erik Boman
+Karen Devine
+Lee Ann Fisk
+Robert Heaphy
+Bruce Hendrickson
+Courtenay Vaughan
+
+ Ohio State University
+Umit Catalyurek
+Doruk Bozdag
+
+National Institute of Standards and Technology
+William F. Mitchell
+

+

+ + +


+ +Zoltan is temporarily unavailable in preparation for a new release. +
+We expect the new release to be completed by October 13, 2006. +If you would like an email notification of the new release, please +join the zoltan-announce mailing list at +http://software.sandia.gov/mailman/listinfo/zoltan-announce. +

+Thank you for your patience. +

+ +For more information, contact: +

+
+ +
+ + + + + + +
+ + + + + + +
+
+ + + + + + + + + + + + + + + diff --git a/Zoltan-3.90/doc/Zoltan_html/Zoltan_download.html b/Zoltan-3.90/doc/Zoltan_html/Zoltan_download.html new file mode 100644 index 00000000..6c723c53 --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/Zoltan_download.html @@ -0,0 +1,241 @@ + + + + + + + + + + + + + + + Zoltan Download Page + + + + + + + + + + + + + + + +
+ + + + + + +
+ +[Sandia National Laboratories] +

[navigation panel]

+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
Zoltan +Home Page
+
+ + + + +
+Contact Zoltan Developers
+
+
+Zoltan Download Page +

+ +


+ +

NOTE: If you have not registered yet, please take a moment +to do so at the +Download Registration Form (Note: this information is only used to +provide usage statistics to our program sponsors).

+ +


+ +

Join the Mailing Lists

+ +If you haven't done so already, +we recommend you subscribe to these mailing lists: +
    +
  • zoltan-announce is used for announcement of Zoltan news, such as new releases. +
  • zoltan-users is a community forum for discussion of issues related to Zoltan. +
+

+ +


+

+ +

Zoltan Releases for Download

+ +The most current version of Zoltan is always available in the +Trilinos framework. +You can download +Trilinos here. +In the Trilinos directory, the Zoltan code is in directory +Trilinos/packages/zoltan. In this directory, you can continue to +build Zoltan separately from Trilinos. +

+ +Stand-alone versions of Zoltan are available below. +We recommend the latest version. +For more details, see the +Zoltan release notes. + +

    +
  • Version 3.83 (1/28/16; as in Trilinos v12.6; aaf328db7e43001ee2d3148f72f12147e51c3293)
  • + +
  • Version 2.1 (10/05/06)
  • +
+

+ +Zoltan is provided as a compressed tar file. Unpack with 'tar xfz zoltan*.tar.gz'. + +


+ +

Zoltan Tutorial

+The Zoltan Tutorial presented at SciDAC2007 is now on-line. Download it here: + + +
+

Last Updated: September 23, 2008 + + + +
+ + + + + + + +
+ + + + + + +
+
+ + + + + + + + + + + + + + + + diff --git a/Zoltan-3.90/doc/Zoltan_html/Zoltan_phil.html b/Zoltan-3.90/doc/Zoltan_html/Zoltan_phil.html new file mode 100644 index 00000000..29434a78 --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/Zoltan_phil.html @@ -0,0 +1,735 @@ + + + + + + + + + + + + + + + Zoltan + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+[Sandia National Laboratories] +

[navigation panel]

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
Zoltan +Home Page
+
+ + + + +
Zoltan +User's Guide
+
+ + + + +
Zoltan +Developer's Guide
+
+ + + + +
+Frequently Asked Questions
+
+ + + + +
Zoltan +Project Description
+
+ + + + +
Papers +and Presentations
+
+ + + + +
How to Cite Zoltan
+
+ + + + +
Download +Zoltan
+
+ + + + +
Report a Zoltan Bug
+
+ + + + +
+Contact Zoltan Developers
+
+
+ + + + +Zoltan: + +
+ +Data-Management Services for Parallel Applications + +

+ + + + + + + +Project Description +
+


+ + +
Overview

+The Zoltan library is a collection of data management services for parallel, +unstructured, adaptive, and dynamic applications. It simplifies the +load-balancing, data movement, unstructured communication, and memory usage +difficulties that arise in dynamic applications such as adaptive +finite-element methods, particle methods, and crash simulations. +Zoltan's data-structure neutral design also lets a wide range of applications +use it without imposing restrictions on application data structures. +Its object-based interface provides a simple and inexpensive way for +application developers to use the library and researchers to make new +capabilities available under a common interface. +

+Zoltan provides tools that help developers of parallel applications. +

+These tools are provided in an easy-to-use toolkit that is callable from C, +C++, and Fortran90. + + + +
Design of Toolkits and Libraries

+Using general-purpose libraries allows +algorithms to be shared among and compared within many applications. The +close dependence of libraries on application data, however, +requires careful design to maintain separation between the +libraries and application data structures. +

+ +One way to provide this separation is to use object-based software design. +Instead of requiring the application to build data structures +required by the library, the application could pass functions that access +the application data structure to the libraries. +For example, rather than require an application to build a complicated graph +description, the library can require an application to provide a +function returning graph vertices and a function returning edge +connectivity for a given vertex. +Using these functions, the +library can build the data structures it needs. +

+ +This object-based design has a number of advantages. +

    +
  • +Changes in the library's data structures need not +propagate back to the application. As long as the set of required functions +does not change, the application does not need to change to use new versions +of the library. +
  • +
  • +Once the set of required functions is implemented, the application can use all +the algorithms in the library. +
  • + +
  • +The required functions are generally easy for +an application to implement, as most applications need to +access their data objects and the interactions between objects +for their own computations. +
  • +
  • +Memory usage is lower as +an application does not have to build an intermediate data structure +that is later converted to appropriate data structures for the library. +
  • +
  • +The constructor for library data structures is called only when it +is needed, and only the data needed for a particular algorithm is obtained. +
  • +
+

+There are a few disadvantages to this object-based approach as well. +

    +
  • +Additional overhead is incurred as the library calls the functions to +build its data structures. +In experiments, however, this cost has been very low +relative to the cost of actual computation in the library. +
  • +
  • +A general-purposes tool can +provide only limited support for manipulations of application data +structures (e.g., data movement). +
  • +
+

+For more detailed information, see +[Hendrickson +and Devine]. +

+ + +


Zoltan's Design

+We have chosen an object-based, callback function design. An application +provides a number of simple callback functions that access the application +data structures. Zoltan then calls these functions to obtain data it needs. +Geometric algorithms are +supported via callback functions returning objects to be balanced and the +weights and coordinates of those objects. +Graph-based algorithms are +supported by callback functions returning objects to be +balanced, edges between objects, and object and edge weights. +For refinement-tree algorithms, additional callback functions return +parent-child relationships. +

+Support for data migration (the movement of data to establish a new +decomposition) is also provided through a similar callback +function interface. An application provides callback functions that pack +object data into and unpack data from communication buffers provided by +Zoltan. Zoltan calls the packing function to load communication buffers, +performs the communication necessary to move the data, and calls the unpacking +function to unload the communication buffers. + +


Zoltan Examples

+Several examples of Zoltan's use can be found in the +Zoltan User's Guide. +

+

+ +


Typical Approach to Dynamic Load Balancing

+Dynamic load balancing has been used in many applications, ranging from +adaptive mesh refinement to particle methods to contact detection algorithms. +In most applications using dynamic load balancing, the load-balancing +algorithm is implemented directly in the application, with close coupling +between the application's and load-balancing algorithm's data structures. +This typical approach has two disadvantages. +

    +
  • It is possible that the application developer did not select the +best algorithm for the application, but the developer is unable to compare the +algorithm with others without taking time to implement many algorithms in the +application. +
  • +
  • The close coupling of the algorithm's and application's +data structures limits the algorithm's use to a single application. +Developers wanting to use the algorithm in a new application have to re-write +the algorithm using the new application's data structures. +
  • +
+As a result, research into and use of dynamic load-balancing algorithms are +severely impaired. +

+ + + +


Why Dynamic Load Balancing is Harder than Static Partitioning

+Many high-quality static partitioning tools exist; examples include +Chaco, +METIS, + +and +SCOTCH. +General-purpose dynamic load-balancing tools are less common, however, +since they are more difficult to implement. The difficulty arises from +fundamental algorithmic and software-engineering +differences between static and dynamic partitioning. These differences are +summarized in the following table. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Static Partitioning...Dynamic Load Balancing...
Generally used as a pre-processor to an application.Runs side-by-side with an application.
Can be (and usually is) implemented serially.Must be implemented in parallel.
Has only modest concern for execution time.Must run quickly (time to load balance should not exceed time to run in + an unbalanced state).
Has only modest concern for memory usage.Must use little memory (should not affect scalability of + application).
Can use file-based interfaces (read geometry from a file; write partition +info to a file).Must use function-call interfaces.
Has no dependence on an application's data structures.Needs information stored in an application's data structures.
Accounts for part sizes and communication costs.Accounts for part sizes, communication costs, and data movement +costs.
+

+ +


Zoltan's Load-Balancing Suite +

+In our experience, no single partitioning strategy is effective for all +parallel computations. Some application require partitions based only on the +problem's workloads and geometry; others benefit from explicit consideration +of dependencies between data. Some applications require the highest quality +partitions possible, regardless of the cost to generate them; others can +sacrifice some quality so long as new partitions can be generated quickly. +For some applications, the cost to relocate data is prohibitively high, so +incremental partitioning algorithms are needed; other applications can +tolerate greater remapping costs. Most important, application developers +might not know in advance which strategies work best in their applications, so +the need a convenient means of comparing algorithms. +

+We provide two classes of parallel partitioning algorithms in the Zoltan +library: +

+ +Once the Zoltan callback functions are implemented, an application can switch +between partitioning algorithms by changing only the +LB_METHOD parameter +through a call to +Zoltan_Set_Param. +Thus, comparing different algorithms within a single application is easy, +enabling users to try several algorithms and find +the best ones for their applications. +

+ +


Data Migration Tools +

+A complicated part of dynamic repartitioning is the need to move data from old +processors to new ones. This data migration requires deletions and insertions +from the application data structures as well as communication between the +processors. +

+To help an application with +data migration, Zoltan requires an application to +supply callback functions that pack data into communication buffers and unpack +data from communication buffers. Zoltan +calls the packing function to load communication buffers with objects to be +exported, performs all communication needed to move the data, and calls the +unpacking function to load the objects in the data structures on the new +processors. This mechanism eliminates the need for the application developer +to implement complicated communication for data migration. +

+ +


Unstructured Communication Library +

+Unlike static applications, where communication patterns remain fixed +throughout the computation, dynamic applications can have complicated, +changing communication patterns. For example: +

    +
  • After adaptive mesh refinement, +new communication patterns must reflect dependencies between newly created +elements. +
  • +
  • +Multiphysics simulations, such as crash simulations, might require complicated +communication to transfer data between decompositions for different simulation +phases. +
  • +
+Zoltan provides an +unstructured communication package +to simplify +communication. The package builds a communication plan, including information +about both message sends and receives for a given processor. The plan can be +reused throughout the application, or destroyed and rebuilt when communication +patterns change. The package also includes simple communication primitives that +insulate the user from details of message sends and receives. +

+ +


Distributed Data Directories +

+Dynamic applications often need to locate off-processor information. After +repartitioning, for example, a processor might need to rebuild ghost cells and +lists of data to be communicated. It might know which data it needs, but not +where the data are located. +

+To help locate off-processor data, Zoltan includes a +distributed data +directory tool that is scalable with respect to both memory usage and +computation time. Processors register their owned objects with the directory. +Then, through a rendezvous algorithm, other processors can look up the +location of data they need. +

+ +


Memory Management Tools +

+Dynamic applications rely heavily on the ability to allocate and free memory +as needed. Memory leaks and invalid memory accesses are common to developing +software. Although many software development tools let users track memory +bugs, these tools are often not available on state-of-the-art parallel +computers. +

+Zoltan's memory management package +provides simple in-application debugging +tools that are beneficial on state-of-the-art computing platforms. The +package includes wrappers around malloc and free that record the location of +all memory +allocation operations. Thus, tracking memory leaks is simplified, as +source-code locations of unfreed-memory allocations can be printed. +Statistics about memory allocations and frees are also available. +

+ + +

+ +
+ + + + + + +
+ + + + + + +
+
+ + + + + + + + + + + + + + + diff --git a/Zoltan-3.90/doc/Zoltan_html/Zoltan_pubs.html b/Zoltan-3.90/doc/Zoltan_html/Zoltan_pubs.html new file mode 100644 index 00000000..d036ad4e --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/Zoltan_pubs.html @@ -0,0 +1,662 @@ + + + + + + + + + + + + + + + Zoltan + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+[Sandia National Laboratories] +

[navigation panel]

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
Zoltan +Home Page
+
+ + + + +
Zoltan +User's Guide
+
+ + + + +
Zoltan +Developer's Guide
+
+ + + + +
+Frequently Asked Questions
+
+ + + + +
Zoltan +Project Description
+
+ + + + +
Papers +and Presentations
+
+ + + + +
How to Cite Zoltan
+
+ + + + +
Download +Zoltan
+
+ + + + +
Report a Zoltan Bug
+
+ + + + +
+Contact Zoltan Developers
+
+
+ + + + +Zoltan: + +
+ +Data-Management Services for Parallel Applications + +

+ + + + + + + +Papers and Presentations + +


+ +This list may not be up-to-date... + +

+ Hypergraph Partitioning +

    +
  • +Hypergraph-based Dynamic Load Balancing for Adaptive Scientific Computations.
    +U. Catalyurek, E. Boman, K. Devine, D. Bozdag, R. Heaphy, L.A. Riesen.
    + Proceedings of IPDPS'07, Best Algorithms Paper Award, March 2007. +
    +Abstract (HTML) +     +Paper (PDF) +     +Citation (BIBTEX) +
  • +

    +

  • +A Repartitioning Hypergraph Model for Dynamic Load Balancing
    +U. Catalyurek, E. Boman, K. Devine, D. Bozdag, R. Heaphy, L.A. Riesen.
    +Journal of Parallel and Distributed Computing, +Vol. 69, No. 8, pp. 711-724, Aug 2009. +
    +Abstract (HTML) +     +Paper (PDF) +     +Citation (BIBTEX) +
  • +

    +

  • +Hypergraph-based Dynamic Partitioning and Load Balancing.
    +U. Catalyurek, D. Bozdag, E. Boman, K. Devine, R. Heaphy, and L.A. Riesen.
    +Advanced Computational Infrastructures for +Parallel/Distributed Adaptive Applications, M. Parashar, ed., Wiley Publishing, Dec. 2009. +
    + +Citation (BIBTEX) +
  • +

    +

  • +Parallel Hypergraph Partitioning for Scientific Computing.
    +K. Devine, E. Boman, R. Heaphy, R. Bisseling, U. Catalyurek.
    + Proceedings of IPDPS 2006, April 2006. +
    +Abstract (HTML) +     +Paper (PDF) +     +Citation (BIBTEX) +
  • +

    +

  • +Parallel Hypergraph Partitioning for Irregular Problems.
    +K. Devine, E. Boman, R. Heaphy, U. Catalyurek, R. Bisseling.
    +SIAM Parallel Processing for Scientific Computing, February 2006. +
    +Viewgraphs (PDF) +
  • +
+ +Zoltan +
    + +
  • +The Zoltan and Isorropia Parallel Toolkits for Combinatorial Scientific Computing: Partitioning, Ordering, and Coloring +
    E.G. Boman, U.V. Catalyurek, C. Chevalier, and K.D. Devine, Scientific Programming vol. 20, no. 2, 2012, special issue on Trilinos. +

    +
  • +Getting Started with Zoltan: A Short Tutorial.
    +K. Devine, E. Boman, L.A. Riesen, U. Catalyurek and C. Chevalier.
    +Proc. 2009 Dagstuhl Seminar on Combinatorial Scientific Computing, +February, 2009.
    +PDF +        +Citation (BIBTEX) +        +Viewgraphs from the tutorial (PDF) +

    +
  • + +
  • +Zoltan Data Management Services for Parallel Dynamic Applications.
    +K. Devine, E. Boman, R. Heaphy, B. Hendrickson, and C. Vaughan.
    +Computing in Science and Engineering, Vol. 4, No. 2, March/April 2002, pp. 90-97. +
    +Paper (PDF) +     +Citation (BIBTEX) +
    +

    + +

  • +Zoltan v3: Parallel Partitioning, Load Balancing and Data-Management Services, User's Guide +.
    +E. Boman, K. Devine, R. Heaphy, B. Hendrickson, V. Leung, L.A. Riesen, C. Vaughan, U. Catalyurek, D. Bozdag, W. Mitchell and J. Teresco.
    +Sandia National Laboratories Tech. Rep. SAND2007-4748W, Albuquerque, NM, 2007. +
    +Manual (HTML) +     +Citation (BIBTEX) +
    +

    + +

  • +Zoltan v3: Parallel Partitioning, Load Balancing and Data-Management Services, Developer's Guide +.
    +E. Boman, K. Devine, R. Heaphy, B. Hendrickson, V. Leung, L.A. Riesen, C. Vaughan, U. Catalyurek, D. Bozdag, and W. Mitchell.
    +Sandia National Laboratories Tech. Rep. SAND2007-4749W, Albuquerque, NM, 2007. +
    + +Citation (BIBTEX) +
    +

    +

  • +Tinkertoy Parallel Computing: A Case Study With Zoltan.
    +K. Devine and B. Hendrickson.
    +Int. J. Computational Science and Engineering, 2005. +
    +Abstract (HTML) +     +Paper (PDF) +     +Citation (BIBTEX) +
  • +

    + +

  • +Design of Dynamic Load-Balancing Tools for Parallel Applications.
    +K. Devine, B. Hendrickson, E. Boman, M. St.John, and C. Vaughan.
    +Proceedings of the International Conference on Supercomputing, +Santa Fe, May, 2000.
    +Abstract (HTML) +     + +Citation (BIBTEX) +
    +

    + +

+Dynamic Load Balancing +
+
    +
  • +Partitioning and Load Balancing for Emerging Parallel Applications and +Architectures.
    +K. Devine, E. Boman, and G. Karypis.
    +Chapter in Parallel Processing for Scientific Computing, + Heroux, Raghavan, Simon, eds. SIAM (2006) 99-126. +
    + +Citation (BIBTEX) +
  • +

    +

  • +New Challenges in Dynamic Load Balancing.
    +K. Devine, E. Boman, R. Heaphy, B. Hendrickson, J. Teresco, J. Faik, +J. Flaherty, L. Gervasio.
    + Applied Numerical Mathematics, Vol. 52, Issues 2-3, pp. 133-152, 2005. +
  • +

    +

  • +Partitioning and Dynamic Load Balancing for the Numerical Solution of +Partial Differential Equations.
    +J. Teresco, K. Devine, J. Flaherty.
    +Chapter in +Numerial Solution of Partial Differential Equations on Parallel +Computers, Bruaset, Bjørstad, Tveito, editors. © Springer-Verlag, 2005. +
  • +

    +

  • +A model for resource-aware load balancing on heterogeneous clusters.
    +J. Faik, J. Flaherty, L. Gervasio, J. Teresco, K. Devine.
    +Williams College Department of Computer Science Technical Report CS-05-01, 2005. +
  • +

    +

  • +Dynamic Load Balancing in Computational Mechanics.
    +B. Hendrickson and K. Devine.
    +Comput. Methods Appl. Mech. Engrg., 184 (2000), 485-500. +
    +Abstract (HTML) +     +Paper (PS.GZ) +
    +

    + +

  • +Graph Partitioning Models for Parallel Computing.
    +Bruce Hendrickson and Tamara G. Kolda.
    +Parallel Computing, 26 (2000), 1519-1534.
    +Abstract (HTML) +     +Paper (PS.GZ) +
    +

    + +

  • +Load Balancing Fictions, Falsehoods and Falacies.
    +Bruce Hendrickson.
    +Applied Mathematical Modelling, 25:99-108, 2000.
    + +Abstract (HTML) +     +Paper (PS.GZ) +     +HTML version of overheads from plenary talk at the 3rd DRAMA Steering Workshop, +September, 1999.
    +

    + +

  • +Interprocessor Communication with Memory Constraints.
    +Ali Pinar and Bruce Hendrickson.
    +Proc. 12th ACM Symp. Parallel Algorithms and Architectures, +July 2000.
    +Abstract (HTML) +     +Paper (PS.GZ) +

    + +

  • +Graph Partitioning and Parallel Solvers: Has the Emperor No Clothes + (Extended Abstract).
    +Bruce Hendrickson.
    +Proc. Irregular'98, + Lecture Notes in +Computer Science, 1457, pp. 218-225, 1998. Copyright Springer-Verlag.
    +Abstract (HTML) +     +Paper (PS.GZ) +
    +

    + +

  • +Parallel Adaptive hp-Refinement Techniques for Conservation Laws.
    +K. Devine and J. Flaherty.
    +Applied Numerical Mathematics, 20 (1996), 367-386. +
    +Abstract (HTML) +     +Paper (PS.GZ) +
    +

    + +

  • +Dynamic Load Balancing for Parallel Finite Element Methods +with Adaptive h- and p-Refinement.
    +K. Devine and J. Flaherty.
    +Proceedings of the Seventh SIAM Conference on +Parallel Processing for Scientific Computing, +San Francisco, CA, February, 1995.
    +Abstract (HTML) +     +Paper (PS.GZ) +
    +

    + +

  • +Parallel Algorithms for Dynamically Partitioning Unstructured Grids.
    +Pedro Diniz, Steve Plimpton, Bruce Hendrickson and Robert Leland.
    +Proceedings of the Seventh SIAM Conference on Parallel +Processing for Scientific Computing, San Francisco, CA, February, 1995.
    +Abstract (HTML) +     +Paper (PS.GZ) +
    +

    + + +

+ +

+ +Coloring + +

+

    +
  • +Improving Graph Coloring on Distributed Memory Parallel Computers.
    +A.E. Sariyüce, E. Saule and Ü.V. Çatalyürek.
    +Proceedings of 18th Annual International Conference on High Performance Computing (HiPC 2011), +Dec 2011, to appear. +

    +

  • +
  • +Distributed-memory Parallel Algorithms for Distance-2 Coloring and Related Problems in Derivative Computation.
    +D. Bozdag, Ü.V. Çatalyürek, A. H. Gebremedhin, F. Manne, E.G. Boman, and F. Özgüner.
    +SIAM Journal of Scientific Computing, Vol. 32, No. 4, pp. 2418-2446, 2010.

    +

  • +
  • +A Framework for Scalable Greedy Coloring on Distributed Memory Parallel Computers.
    +D. Bozdag, A. Gebremedhin, F. Manne, E.G. Boman, and Ü.V. Çatalyürek.
    +Journal of Parallel and Distributed Computing, Vol. 68, No. 4, pp. 515-535, Apr 2008.

    +

  • +
  • +A Scalable Parallel Graph Coloring Algorithm for Distributed-Memory +Computers.
    +E. Boman, D. Bozdag, U. Catalyurek, A. Gebremedhim, F. Manne.
    +EuroPar 2005, August 2005. +
  • +

    +

  • +A Parallel Distance-2 graph coloring algorithm for distributed memory +computers.
    +E. Boman, D. Bozdag, U. Catalyurek, A. Gebremedhin, F. Manne, F. Ozguner.
    +HPCC-05, September 2005. +
  • + +
+ + +Miscellaneous +
    +
  • +Combinatorial Algorithms Enabling Computational Science: Tales From the +Front.
    +S. Bhowmick, E. Boman, K. Devine, A. Gebremedhin, B. Hendrickson, P. Hovland, T. Munson, A. Pothen.
    +Sandia National Laboratories Tech. Rep. SAND2006-3914C, Albuquerque, NM, 2006. +Submitted to Journal of Physics: Conference Series, July 2006. +
  • +

    + +

+
+ +
+ + + + + + +
+ + + + + + +
+
+ + + + + + + + + + + + + + + diff --git a/Zoltan-3.90/doc/Zoltan_html/dev_html/brack3d.png b/Zoltan-3.90/doc/Zoltan_html/dev_html/brack3d.png new file mode 100644 index 00000000..a0d7e74a Binary files /dev/null and b/Zoltan-3.90/doc/Zoltan_html/dev_html/brack3d.png differ diff --git a/Zoltan-3.90/doc/Zoltan_html/dev_html/dev.html b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev.html new file mode 100644 index 00000000..97a0b760 --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev.html @@ -0,0 +1,409 @@ + + + + + + + + + + + + + + + Zoltan Developer's Guide + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+[Sandia National Laboratories] +

[navigation panel]

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
Zoltan +Home Page
+
+ + + + +
Zoltan +User's Guide
+
+ + + + +
Zoltan +Developer's Guide
+
+ + + + +
+Frequently Asked Questions
+
+ + + + +
Zoltan +Project Description
+
+ + + + +
Papers +and Presentations
+
+ + + + +
How to Cite Zoltan
+
+ + + + +
Download +Zoltan
+
+ + + + +
Report a Zoltan Bug
+
+ + + + +
+Contact Zoltan Developers
+
+ + + + +
+Sandia Privacy and Security Notice
+
+
+ + + + +Zoltan: + +
+ +Parallel Partitioning, Load Balancing and +Data-Management Services + +

+ + + + + + + +

Developer's +Guide  +

+


+

+


+

+

The Zoltan Team

+ + + + + +
+ Sandia National Laboratories
+Erik Boman
+Cedric Chevalier
+Karen Devine
+Vitus Leung
+Sivasankaran Rajamanickam
+Lee Ann Riesen
+Michael Wolf
+
+ Ohio State University
+Umit Catalyurek
+Doruk Bozdag
+
+
+

Past Zoltan Contributors

+ + + + + + + + +
+ Sandia National Laboratories:
+Robert Heaphy
+Bruce Hendrickson
+Matthew St. John
+Courtenay Vaughan
+
+
+ Williams College
+James Teresco
+
+ National Institute of Standards and Technology
+William F. Mitchell
+
+ Rensselaer Polytechnic Institute
+Jamal Faik
+Luis Gervasio
+
+ +

+

+


+
+Zoltan Developer's Guide, Version 3.3
+ +

+

+

+ +The Zoltan Developer's Guide is undergoing major revision and will be re-released with Zoltan 4.0. +Please excuse the inconvenience. + +
+ + +
+Copyright (c) 2000-2012, Sandia National Laboratories.
+
+ +
+ + + + + + + + + + + + + + + + + + diff --git a/Zoltan-3.90/doc/Zoltan_html/dev_html/devOLD.html b/Zoltan-3.90/doc/Zoltan_html/dev_html/devOLD.html new file mode 100644 index 00000000..6a5edbb3 --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/dev_html/devOLD.html @@ -0,0 +1,506 @@ + + + + + + + + + + + + + + + Zoltan Developer's Guide + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+[Sandia National Laboratories] +

[navigation panel]

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
Zoltan +Home Page
+
+ + + + +
Zoltan +User's Guide
+
+ + + + +
Zoltan +Developer's Guide
+
+ + + + +
+Frequently Asked Questions
+
+ + + + +
Zoltan +Project Description
+
+ + + + +
Papers +and Presentations
+
+ + + + +
How to Cite Zoltan
+
+ + + + +
Download +Zoltan
+
+ + + + +
Report a Zoltan Bug
+
+ + + + +
+Contact Zoltan Developers
+
+ + + + +
+Sandia Privacy and Security Notice
+
+
+ + + + +Zoltan: + +
+ +Parallel Partitioning, Load Balancing and +Data-Management Services + +

+ + + + + + + +

Developer's +Guide  +

+


+

+


+

+

The Zoltan Team

+ + + + + +
+ Sandia National Laboratories
+Erik Boman
+Cedric Chevalier
+Karen Devine
+Vitus Leung
+Lee Ann Riesen
+
+ Ohio State University
+Umit Catalyurek
+Doruk Bozdag
+
+
+

Past Zoltan Contributors

+ + + + + + + + +
+ Sandia National Laboratories:
+Robert Heaphy
+Bruce Hendrickson
+Matthew St. John
+Courtenay Vaughan
+
+
+ Williams College
+James Teresco
+
+ National Institute of Standards and Technology
+William F. Mitchell
+
+ Rensselaer Polytechnic Institute
+Jamal Faik
+Luis Gervasio
+
+ +

+

+


+
+Zoltan Developer's Guide, Version 3.2
+ +

+

+
+DOWNLOAD PDF VERSION HERE. +
+

+ + +

+Introduction and General Principles

+ +
Philosophy of Zoltan +
Coding Principles in Zoltan +
    Include files +
    Global Variables +
    Function Names +
    Parallel Communication +
    Memory Management +
    Errors, Warnings and Return Codes +
+Zoltan Quality Assurance +
+ +

+Zoltan Distribution

+ +
    CVS +
    Layout of Directories +
    Compilation and Makefiles
+ +

+Zoltan Interface and Data Structures

+ +
    Interface Functions +
    ID Data Types +
    Data Structures
+ +

+Services (to simplify new algorithm development)

+ +
    Parameter Setting Routines +
    Parallel Computing Routines +
    Common Functions for Querying Applications +
    Hash Function +
    Timing Routines +
    High-Level Timing Services: ZOLTAN_TIMER +
    Debugging Services
+ +

+Adding New Load-Balancing Algorithms to Zoltan

+ +
    Load-Balancing Interface Routines +
    Load-Balancing Function Implementation +
    Data Structures +
    Memory Management +
    Parameters +
    Part Remapping +
+ +

+Migration Tools

+ +

+FORTRAN Interface

+ +

+C++ Interface

+ +

+References

+ +

+Appendix: Using the Test Drivers zdrive, zCPPdrive and zfdrive

+ + + +

+Appendix: Visualization of Geometric Partitions

+ + + +

+Appendix: Using the Test Script test_zoltan

+ +

+Appendix: Recursive Coordinate Bisection (RCB)

+ +

+Appendix: Recursive Inertial Bisection (RIB)

+ +

+Appendix: Graph Partitioning (ParMETIS and Jostle)

+ +

+Appendix: Hypergraph Partitioning (PHG)

+ +

+Appendix: Refinement Tree

+ +

+Appendix: Hilbert Space_Filling Curve (HSFC)

+ +

+Appendix: Handling Degenerate Geometries

+ +
+Copyright (c) 2000-2007, Sandia National Laboratories.
+
+
[Zoltan Home Page  |  Next:  +Introduction and General Principles
+ + + + + + + + + + + + + + + + + + diff --git a/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_add.html b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_add.html new file mode 100644 index 00000000..da445d75 --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_add.html @@ -0,0 +1,127 @@ + + + + + + + + + Zoltan Developer's Guide: Adding Algorithms + + + + + + +

+Adding New Load-Balancing Algorithms to Zoltan

+The Zoltan library is designed so that adding new load-balancing algorithms +to the library is simple. In many cases, existing code can be easily modified +to use the interface query functions to build the data structures needed +for the algorithm. The process for adding new algorithms to the library +is described below; more detail is provided at each link. +
    +
  1. +Make sure you follow the Philosophy +of Zoltan and the Coding Principles +in Zoltan.
  2. + +
  3. +Use the Data Structures +provided by Zoltan.
  4. + +
  5. +Implement a Load-Balancing Function front-end +to the algorithm. Note that Zoltan load-balance methods should assign objects both to processors and parts, which may be different. The recommended strategy is to assign objects to parts first, then use Zoltan_LB_Part_To_Proc to generate the corresponding processors.
  6. + +
  7. +Add the algorithm to the Load-Balancing +Interface Routines.
  8. + +
  9. +Add the Parameters needed by the algorithm. +Also make sure that the algorithm uses the General Parameters in Zoltan properly, in particular +Imbalance_Tol and +Debug_Level.
  10. + +
  11. +If necessary, write a routine to free your dynamically allocated data structures. +See tips on memory management
  12. +in Zoltan. + +
  13. +If your algorithm uses persistent data structures, +like the RCB tree with KEEP_CUTS, +write a routine to copy your load balancing +data structure.
  14. + +
  15. +We recommend you add part remapping to your algorithm using +Zoltan_LB_Remap.
  16. + +
  17. +Update the Fortran +and C++ interfaces, if necessary.
  18. + +
  19. +Document your new method. The documentation should be written in a format +that can easily be converted into HTML and PDF. Consider adding a +simple application to the examples directory demonstrating the +use of your method.
  20. + +
  21. +Please contact the Zoltan team if you would like your method to be distributed +with future versions of Zoltan.
  22. + +
+ +
+
[Table of Contents  |  Next:  +Load-Balancing Interface Routines  |  Previous:  +Debugging Services  |  Privacy and Security] + + diff --git a/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_add_interface.html b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_add_interface.html new file mode 100644 index 00000000..640943fe --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_add_interface.html @@ -0,0 +1,101 @@ + + + + + + + + + + Zoltan Developer's Guide: Adding Interface Routines + + + + + + +

+Load-Balancing Interface Routines

+Any new method that you wish to add to the Zoltan library must have an +interface that conforms to the prototype LB_FN. +Note that the load balancing +function may return either import lists, export lists, or both. All +processes must return the same type of list. If import (export) lists +are not computed, then the variable num_import (num_export) +must be set to a negative number (typically -1) upon return. +Full support of the RETURN_LISTS +parameter is not required. +If RETURN_LISTS +is not set to NONE, +the new algorithm may return either import or export lists; the Zoltan +interface will then build the lists requested by +RETURN_LISTS. + +

+A new algorithm must be added to the load-balancing interface for use +with parameter +LB_METHOD. +An entry for the new algorithm must be added to the enumerated type Zoltan_LB_Method +in lb/lb_const.h. An external LB_FN +prototype for the load-balancing function must also be added to lb/lb_const.h; +see the prototype for function Zoltan_RCB as an example. A character +string describing the new algorithm should be chosen to be used as the parameter +value for +LB_METHOD. +In +function Zoltan_LB_Set_LB_Method, +a test for this string should be added +and the Method and LB_Fn fields of the Zoltan_Struct +should be set to the new enumerated type value and new load-balancing function +pointer. +
  +

+


+
[Table of Contents  |  Next:  +Load-Balancing Function Implementation  |  Previous:  +Adding New Algorithms  |  Privacy and Security] + + diff --git a/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_add_lb.html b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_add_lb.html new file mode 100644 index 00000000..73c016cc --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_add_lb.html @@ -0,0 +1,259 @@ + + + + + + + + + + Zoltan Developer's Guide: Adding Load-Balancing Functions + + + + + + +

+Load-Balancing Function Implementation

+The new load-balancing algorithm should be implemented as an ZOLTAN_LB_FN. +The type definition for an ZOLTAN_LB_FN is in lb/lb_const.h and +is described below. When the new algorithm is selected, +the LB_Fn field of the Zoltan_Struct is set to point to the ZOLTAN_LB_FN +function for the new algorithm. This pointer is then used in invoking load +balancing in Zoltan_LB_Partition. +
  + +

+


+
typedef int ZOLTAN_LB_FN +(struct Zoltan_Struct *zz, +float *part_sizes, +int *num_import, +ZOLTAN_ID_PTR *import_global_ids, +ZOLTAN_ID_PTR *import_local_ids, +int **import_procs, +int **import_to_parts, +int *num_export, +ZOLTAN_ID_PTR *export_global_ids, +ZOLTAN_ID_PTR *export_local_ids, +int **export_procs,  +int **export_to_parts);  +
+
The ZOLTAN_LB_FN function type describes the arguments passed to a +load-balancing function. The input to the function is a Zoltan_Struct +containing pointers to application-registered functions to be used in the +load-balancing algorithm. The remaining arguments are output parameters +listing the objects to be imported or exported to the processor in the +new decomposition. The arrays for global and local IDs and source processors +must be allocated by the load-balancing function. The load-balancing function +may return either the import arrays, the export arrays, or both. If no +import data is returned, *num_import must be set to a negative number, +and similarly with *num_export. +Full support of the RETURN_LISTS +parameter is not required. +If RETURN_LISTS +is not set to NONE, +the new algorithm may return either import or export lists; the Zoltan +interface will then build the lists requested by +RETURN_LISTS. + +
  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Arguments:
    zzA pointer to the Zoltan_Struct +to be used in the load-balancing algorithm.
    part_sizesInput: an array of part sizes for each weight component. +Entry +part_sizes[i*obj_weight_dim+j] +contains the user-requested +part size for part i with respect to object weight j for +i=0,1,...,number of parts-1, and +j=0,1,...,obj_weight_dim-1. +If the application sets parameter +OBJ_WEIGHT_DIM, +obj_weight_dim is the set value of +OBJ_WEIGHT_DIM; +otherwise, obj_weight_dim is one. +
    num_importUpon return, the number of objects to be imported to the processor +for the new decomposition. A negative number indicates that no import data +has been computed and the import arrays should be ignored.
    import_global_idsUpon return, an array of num_import global IDs +of objects to be imported to the processor for the new decomposition. +If this array is non-null, it must be allocated by +Zoltan_Special_Malloc. +
    import_local_idsUpon return, an array of num_import local IDs +of objects to be imported to the processor for the new decomposition. +If this array is non-null, it must be allocated by +Zoltan_Special_Malloc. +
    import_procsUpon return, an array of size num_import containing the processor +IDs of processors owning (in the old decomposition) the objects to be imported +for the new decomposition. +If this array is non-null, it must be allocated by +Zoltan_Special_Malloc. +
    import_to_partsUpon return, an array of size num_import containing the part +IDs of parts to which objects will be imported + in the NEW decomposition. +If this array is non-null, it must be allocated by +Zoltan_Special_Malloc. +
    num_exportUpon return, the number of objects to be exported from the processor +for the new decomposition. A negative number indicates that no export data +has been computed and the export arrays should be ignored.
    export_global_idsUpon return, an array of num_export global IDs +of objects to be exported from the processor for the new decomposition. +If this array is non-null, it must be allocated by +Zoltan_Special_Malloc. +
    export_local_idsUpon return, an array of num_export local IDs +of objects to be exported from the processor for the new decomposition. +If this array is non-null, it must be allocated by +Zoltan_Special_Malloc. +
    export_procsUpon return, an array of size num_export containing the +processor IDs of processors owning (in the old decomposition) the objects +to be exported for the new decomposition.  +If this array is non-null, it must be allocated by +Zoltan_Special_Malloc. +
    export_to_partsUpon return, an array of size num_export containing the +part IDs of parts to which the objects will be +exported for the new decomposition.  +If this array is non-null, it must be allocated by +Zoltan_Special_Malloc. +
Returned Value:
    intError code.
+ +
+
[Table of Contents  |  Next:  +Data Structures  |  Previous:  +Load-Balancing Interface Routines  |  Privacy and Security] + + diff --git a/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_add_memory.html b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_add_memory.html new file mode 100644 index 00000000..2ce07f0b --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_add_memory.html @@ -0,0 +1,276 @@ + + + + + + + + + Zoltan Developer's Guide: Adding Algorithms: How to handle memory + + + + + + + + +

+Memory Management in Zoltan Algorithms

+Zoltan uses a memory management +package to simplify debugging of memory problems. +It is strongly recommended that algorithm +developers use the routines in this package, such as +ZOLTAN_MALLOC , +ZOLTAN_CALLOC and +ZOLTAN_FREE, instead +of the standard C routines for most memory management. +

+Macros that simplify the allocation of global and local identifiers (IDs) +are defined in zz/zz_id_const.h. These macros are described in the +ID Data Types section. The macros include +error checking for the allocations and, thus, their use is highly recommended. + +

When a dynamic structure needs to be returned to the application, +special memory allocation routines are needed.  For example, the import +and export lists of data to migrate are returned to an application from +Zoltan_LB_Partition +and +Zoltan_Invert_Lists. +There are two special routines for managing memory for such situations, +called Zoltan_Special_Malloc and Zoltan_Special_Free. +Algorithms must use these functions to maintain compatibility with both +C and Fortran90 applications; these special routines manage memory in a +way that is compatible with both languages. +

Some load-balancing algorithms may contain persistent data structures, +that is, data structures that are preserved between calls to the load-balancing +routine. The Zoltan_Struct structure contains a field +LB.Data_Structure +for this purpose, allowing multiple Zoltan structures to preserve +their own decomposition data. The developer should write a function that +frees this data structure.  Use Zoltan_RCB_Free_Structure as an +example. +

+ + +


+ +
+int Zoltan_Special_Malloc(struct +Zoltan_Struct +*zz, void **array, int size, ZOLTAN_SPECIAL_MALLOC_TYPEtype);  +
+

The Zoltan_Special_Malloc routine allocates memory to be returned to the application by Zoltan (e.g., the result arrays +of +Zoltan_LB_Partition +and +Zoltan_Invert_Lists). +Returned memory must be allocated by Zoltan_Special_Malloc to insure it is allocated +by the same language as the application. Memory allocated by Zoltan_Special_Malloc +must be deallocated by Zoltan_Special_Free. +
  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Arguments:
    zzThe Zoltan structure currently in use.
    array +Upon return, a pointer to the allocated space. +Usually of type int** or ZOLTAN_ID_PTR*. +
    sizeThe number of elements (not bytes) to be allocated.
    typeThe type of array to allocate. Must be one of ZOLTAN_SPECIAL_MALLOC_INT, +ZOLTAN_SPECIAL_MALLOC_GID +or ZOLTAN_SPECIAL_MALLOC_LID for processor numbers, global IDs and local IDs, respectively. +
Returned Value:
   int1 if the allocation succeeded; 0 if it failed.
Example:
+ +ierr = Zoltan_Special_Malloc(zz, (void **)import_gid, +
                             num_import, +
                             ZOLTAN_SPECIAL_MALLOC_GID); +
+
Allocates an array with num_import global IDs +and returns a pointer to the allocated space in import_gid. +
+ +

+ + +


+ +
+int Zoltan_Special_Free(struct Zoltan_Struct +*zz, void **array, ZOLTAN_SPECIAL_MALLOC_TYPE type);  +
+

Zoltan_Special_Free frees memory allocated by Zoltan_Special_Malloc. +The array pointer is set to NULL upon return. +
  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Arguments:
    zzThe Zoltan structure currently in use.
    arrayThe array to be freed. Upon return, the pointer is set to NULL.
    typeThe type of the array. Must be one of ZOLTAN_SPECIAL_MALLOC_INT, +ZOLTAN_SPECIAL_MALLOC_GID +or ZOLTAN_SPECIAL_MALLOC_LID for processor numbers, global IDs and local IDs, respectively.
Returned Value:
   int1 if the deallocation succeeded; 0 if it failed.
Example:
+ +ierr = Zoltan_Special_Free(zz, (void **)import_gid, +
                       ZOLTAN_SPECIAL_MALLOC_GID); +
+
Frees the global IDs +array import_gid and sets it to NULL.
+ +
  +

+ +


+
[Table of Contents  |  Next:  +Parameters  |  Previous:  +Data Structures  |  Privacy and Security] + + diff --git a/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_add_params.html b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_add_params.html new file mode 100644 index 00000000..af08d10a --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_add_params.html @@ -0,0 +1,118 @@ + + + + + + + + + + Zoltan Developer's Guide: Adding Parameters + + + + + + +

+Adding new parameters

+All parameters in Zoltan should be set and accessed through the parameter +setting routines. To add a new parameter to an existing method, you +need to do the following: +
    +
  • +In the source code for the desired method, search for  the place where +the static array of parameters is defined. It will look something like: +static +PARAM_VARS Method_params[] = { ... }.  Add a line with the name +of the new parameter, a pointer to the variable you want to associate (usually +NULL), and its type.
  • + +
  • +In the method source code, bind the parameter to a local variable through +Zoltan_Bind_Param. +Make sure you do this before Zoltan_Assign_Param_Vals +is +invoked.
  • + +
  • +Update the parameter function for the method in question. This routine +is typically called Zoltan_Method_Set_Param. This +routine checks if a given string is a valid parameter for that method. +It may also verify the values.
  • +
+When you add a new method to Zoltan, you also need to: +
    +
  • +Write a parameter function for your method that checks whether a given +string and value is a valid parameter pair for your method. See Zoltan_RCB_Set_Param +in rcb/rcb.c for an example.
  • + +
  • +Let your method access the parameters via Zoltan_Bind_Param +and +Zoltan_Assign_Param_Vals.
  • + +
  • +Change the parameter function array in params/set_params.c to include +your parameter function. Simply add a new entry to the static array that +looks like: static ZOLTAN_SET_PARAM_FN * Param_func[] = {...}.
  • + +
  • +Make sure your method uses the key +parameters in Zoltan correctly.
  • +
+One useful convention is to put your method routine and your corresponding +parameter function in the same source file. This way you can define the +parameters in a static array. This convention eliminates  the risk +of bugs caused by duplicate declarations (that are, incorrectly, not identical). +
+
+
[Table of Contents  |  Next:  +Part Remapping  |  Previous:  +Memory Management  |  Privacy and Security] + + diff --git a/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_add_remap.html b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_add_remap.html new file mode 100644 index 00000000..569bae7f --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_add_remap.html @@ -0,0 +1,248 @@ + + + + + + + + + + Zoltan Developer's Guide: Part Remapping + + + + + + +

+Part Remapping

+Part remapping can be incorporated into load-balancing algorithms. +The part remapping algorithm works as follows: +
    +
  • +After partitioning within an ZOLTAN_LB_FN +but before import or export lists are built, the partitioning algorithm calls +Zoltan_LB_Remap. +
  • +
  • +Zoltan_LB_Remap builds a bipartite +graph based on local import or export information (depending on which is +available in the partitioning algorithm). Vertices of the graph are +processor or part numbers used in the old (input to the partitioner) +and new (computed by the partitioner) decompositions. Edges connect +old and new vertices; edge weight for edge eij is the +number of objects in old part i that are also in new part +j. The bipartite graph is stored as a hypergraph, so that Zoltan's +hypergraph matching routines may be applied. +
  • +
  • +Zoltan_LB_Remap gathers the local +hypergraph edges onto each processor and performs a serial matching of +the vertices. This matching defines the remapping. +
  • +
  • +Zoltan_LB_Remap remaps the input +processor and part information to reflect the remapping and returns +the result to the application. It also builds array zz->LB.Remap +that is used in other functions (e.g., +Zoltan_LB_Box_PP_Assign and +Zoltan_LB_Point_PP_Assign). +
  • +
  • +Using the remapping information returned from +Zoltan_LB_Remap, the partitioning +algorithm builds the import or export lists to return to the application. +Note: if the partitioning algorithm builds import lists, data may have to be +moved to appropriate processors before building import lists to reflect +the remapping; see rcb/shared.c for an example. +
  • +
+ + +

+


+
int Zoltan_LB_Remap +(struct Zoltan_Struct *zz, +int *new_map, +int num_obj, +int *procs, +int *old_parts, +int *new_parts, +int export_list_flag); +
+
+Zoltan_LB_Remap remaps computed part (or processor) numbers in +an attempt to maximize the amount of data that does not have to be migrated +to the new decomposition. It is incorporated directly into partitioning +algorithms, and should be called after the new decomposition is computed +but before return lists (import or export lists) are created. +Zoltan_LB_Remap should be invoked when Zoltan parameter +REMAP is one. +Even when +REMAP is one, +remapping is not done under a number of conditions; these conditions are +listed with the description of +REMAP. + +
  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Arguments:
    zzA pointer to the Zoltan_Struct +used in the partitioning algorithm.
    new_mapUpon return, a flag indicating whether remapping was actually done. +Remapping is not done under a number of conditions (described with +parameter +REMAP) or when the +computed remap gives a worse or equivalent result than the decomposition +computed by the partitioning algorithm. +
    num_objInput: the number of objects the processor knows about after +computing the decomposition. If the partitioning algorithm computes +export lists, num_obj is the number of objects stored on the +processor; if it computes import lists, num_obj is the number of +objects that will be stored on the processor in the new decomposition. +
    procs +Upon input: +an array of size num_obj containing processor +assignments for the objects; + if export_list_flag == 1, + procs contains processor assignments +in the NEW decomposition (computed by the partitioner); otherwise, + procs contains processor assignments +in the OLD decomposition (input by the application). + Upon return, procs contains remapped +processor assignments for the NEW decomposition, regardless of the value of +export_list_flag. +
    old_parts +Upon input: +an array of size num_obj containing part +assignments for the objects in the OLD decomposition (input by the +application). +
    new_parts +Upon input: +an array of size num_obj containing part +assignments for the objects in the NEW decomposition (computed by the +partitioning algorithm). +Upon return: +new_parts contains remapped part assignments in the NEW decomposition. + +
    export_list_flag +Flag indicating whether the partitioning algorithm computes + export lists or import lists. The procedure +for building the bipartite +graph depends on whether +the partitioning algorithm knows export or import information. +
Returned Value:
    intError code.
+ +
+
[Table of Contents  |  Next:  +Migration Tools  |  Previous:  +Adding new parameters  |  Privacy and Security] + + diff --git a/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_add_struct.html b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_add_struct.html new file mode 100644 index 00000000..7220a3b4 --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_add_struct.html @@ -0,0 +1,228 @@ + + + + + + + + + Zoltan Developer's Guide: Adding Data Structures + + + + + + +

+Data Structures

+The main data structures for the algorithm should be pointed to by the +LB.Data_Structure +field of the Zoltan_Struct. +This requirement allows reuse of data structures from one invocation of +the new load-balancing algorithm to the next. It also prevents the use +of global data structures for the algorithm so that multiple instances +of the algorithm may be used (i.e., the same algorithm can be used for +multiple Zoltan_Struct structures).  +An example showing the construction of data structures for the +Recursive +Coordinate Bisection (RCB) algorithm is included +in the figure below. +
+ + + + + +
+ +
/* Allocate RCB data structure for this Zoltan structure. +
 * If the previous data structure still exists, free the Dots +first; +
 * the other fields can be reused. +
 */ +
if (zz->LB.Data_Structure == NULL) { +
   rcb = (RCB_STRUCT *) ZOLTAN_MALLOC(sizeof(RCB_STRUCT)); +
   zz->LB.Data_Structure = (void *) rcb; +
   rcb->Tree_Ptr = (struct rcb_tree *)  +
                    +ZOLTAN_MALLOC(zz->Num_Proc*sizeof(struct +rcb_tree)); +
   rcb->Box = (struct rcb_box *) ZOLTAN_MALLOC(sizeof(struct +rcb_box)); +
} +
else { +
   rcb = (RCB_STRUCT *) zz->LB.Data_Structure; +
   ZOLTAN_FREE(&(rcb->Dots)); +
} +
+
Example demonstrating allocation of data structures +for the RCB algorithm.  (Taken from rcb/rcb_util.c.)
+ +

The data needed for the algorithm is collected through calls to the +query functions registered by the application. Algorithms should test the +query function pointers for NULL and report errors when needed functions +are not registered. The appropriate query functions can be called to build +the algorithm's data structures up front, or they can be called during +the algorithm's execution to gather data only as it is needed. The figure +below shows how the Dots data structure needed by RCB is built.  +The call to zz->Get_Num_Obj invokes an ZOLTAN_NUM_OBJ_FN +query function to determine the number of objects on the processor.  +Space for the Dots data structure is allocated through calls to +ZOLTAN_MALLOC, ZOLTAN_MALLOC_GID_ARRAY, +and ZOLTAN_MALLOC_LID_ARRAY.  The Dots information is obtained +through a call to the Zoltan service function Zoltan_Get_Obj_List; this +function calls either an ZOLTAN_OBJ_LIST_FN +or an ZOLTAN_FIRST_OBJ_FN/ZOLTAN_NEXT_OBJ_FN +pair to get the object IDs and weights. The data for each +Dot is +set in the function initialize_dot, which includes calls to zz->Get_Geom, +an ZOLTAN_GEOM_FN +query function. +
  +
  +

+ + + + + +
   + +
/* +
 * Allocate space for objects.  Allow extra space +
 * for objects that are imported to the processor. +
 */ +

*num_obj = zz->Get_Num_Obj(zz->Get_Num_Obj_Data, &ierr); +
if (ierr) { +
  ZOLTAN_PRINT_ERROR(zz->Proc, +yo, +
                 +"Error returned from Get_Num_Obj."); +
  return(ierr); +
} +

*max_obj = (int)(1.5 * *num_obj) + 1; +
*global_ids = ZOLTAN_MALLOC_GID_ARRAY(zz, (*max_obj)); +
*local_ids  = ZOLTAN_MALLOC_LID_ARRAY(zz, (*max_obj)); +
*dots = (struct Dot_Struct *) +
         ZOLTAN_MALLOC((*max_obj)*sizeof(struct +Dot_Struct)); +

if (!(*global_ids) || (zz->Num_LID && !(*local_ids)) || +!(*dots)) { +
  ZOLTAN_PRINT_ERROR(zz->Proc, +yo, "Insufficient memory."); +
  return(ZOLTAN_MEMERR); +
} +

if (*num_obj > 0) { +

  if (wgtflag) { +

    /* +
     *  Allocate space for object weights. +
     */ +

    objs_wgt = (float *) ZOLTAN_MALLOC((*num_obj)*sizeof(float)); +
    if (!objs_wgt) { +
      ZOLTAN_PRINT_ERROR(zz->Proc, +yo, "Insufficient memory."); +
      return(ZOLTAN_MEMERR); +
    } +
    for (i = 0; i < *num_obj; i++) objs_wgt[i] += 0.; +
  } +

  /* +
   *  Get list of objects' IDs and weights. +
   */ +

  Zoltan_Get_Obj_List(zz, *global_ids, *local_ids, wgtflag, +
                  objs_wgt, &ierr); +
  if (ierr) { +
    ZOLTAN_PRINT_ERROR(zz->Proc, +yo, +
                   +"Error returned from Zoltan_Get_Obj_List."); +
    ZOLTAN_FREE(&objs_wgt); +
    return(ierr); +
  } +

  ierr = initialize_dot(zz, *global_ids, *local_ids, *dots, +
                        +*num_obj, wgtflag, objs_wgt); +
  if (ierr == ZOLTAN_FATAL || ierr == ZOLTAN_MEMERR) { +
    ZOLTAN_PRINT_ERROR(zz->Proc, +yo, +
                   +"Error returned from initialize_dot."); +
    ZOLTAN_FREE(&objs_wgt); +
    return(ierr); +
  } +

  ZOLTAN_FREE(&objs_wgt); +
} +
+

Example demonstrating how data structures are +built for the RCB algorithm.  (Taken from rcb/shared.c.)
+ +

+The data structures pointed to by zz->LB.Data_Structure will be freed +at some point, and may be copied. +

A function that frees these structures and resets zz->LB.Data_Structure +to NULL should be written. The function should be called when the load-balancing +algorithm exits, either normally or due to an error condition. +The function +Zoltan_RCB_Free_Structure in rcb/rcb_util.c may be used as an example. + +

If your algorithm uses the KEEP_CUTS +parameter, a function that copies one zz->LB.Data_Structure to another is +required. This is particularly important for C++, +which may create temporary objects +at runtime by invoking a copy operator (which will call your copy function). +It is a convenience for C applications, which may wish to copy one Zoltan_Struct +to another. +See the function Zoltan_RCB_Copy_Structure in rcb/rcb_util.c +for an example. + +


+
[Table of Contents  |  Next:  +Memory Management  |  Previous:  +Load-Balancing Function Implementation  |  Privacy and Security] +
  + + diff --git a/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_cpp.html b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_cpp.html new file mode 100644 index 00000000..19efcb9a --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_cpp.html @@ -0,0 +1,227 @@ + + + + + + + + + + Zoltan Developer's Guide: C++ Interface + + + + + + +

+C++ Interface

+As with the Fortran interface just described, any change to the user +API of Zoltan should be reflected in the C++ interface. This section +explains the conventions used in the C++ interface, which you will want +to follow when you modify or expand it. + + + +

Classes

+The C language Zoltan library already observes the principles of +object oriented program design. Each sub function of Zoltan (load +balancing, timing, etc.) has a data structure +associated with it. This data structure maintains all the state required +for one instance of that sub function. Each request of the library for +some operation requires that data structure. + +

The classes in the Zoltan C++ library follow the structure just described. +Each class is defined in a header +file and encapsulates a Zoltan data structure and the functions that +operate on that structure. A C++ application wishing to use a +feature of Zoltan, would include the feature's header file in it's source, and +link with the Zoltan C library. + +

The C language load balancing data stucture +(Zoltan_Struct) +and the C functions that operate on it are accessed +through the C++ Zoltan class, defined in zoltan_cpp.h. + +

The communication package is encapsulated the Zoltan_Comm class +defined in zoltan_comm_cpp.h. +Again, to use the communication utility of Zoltan from a C++ program, +include zoltan_comm_cpp.h and use the C++ methods defined there. + +

The C++ Zoltan timer class is called Zoltan_Timer_Object and +is defined in zoltan_timer_cpp.h. + +

The distributed directory utility of Zoltan is encapsulated in the +class Zoltan_DD defined in zoltan_dd_cpp.h + +

Programming Conventions

+When modifying the interface to Zoltan , you will want to modify +the appropriate C++ header file accordingly. This section describes the +conventions to follow to maintain a consistent and correct library interface +for the C++ user of Zoltan. + +

Namespaces

+In order to maintain portability across platforms, there is no Zoltan +namespace. Many C++ compilers do not support namespaces at this time. +The name of each Zoltan class begins with Zoltan_, and hopefully +this will never clash with another namespace. + +

Class names

+Class names are Zoltan_ followed by text indicating the sub +function of Zoltan that is encapsulated by the class. + +

Method names

+Method names are derived from the C library function names in such a +way that the name will be obvious to a person familiar with the C library. +We remove the beginning of the C library name, the part that identifies +the subset of the Zoltan library that the function is part of, and keep +the last part of the C library name, the part that describes what the +function does. For example the C function Zoltan_LB_Partition becomes +the C++ method LB_Partition in the class Zoltan and +C function Zoltan_Comm_Create becomes the C++ method +Create in the class Zoltan_Comm. + +

Const methods

+All class methods which can be declared const, because they +do not modify the object, should be declared const. This allows +C++ programmers to call these methods on their const objects. + +

Declaration of method parameters

+ +Parameters that are not changed in the method should be declared const. +This can get complicated, but it helps to read declarations from right to +left. const int * & p says p is a reference to a pointer to +a const int and means the method will not change the value pointed +to by p. On the other hand int * const & p says that +p is a reference to a const pointer to int so the method will +not change the pointer. + +

Variables that are passed by value in a C function will be passed by +const reference in the C++ method. This is semantically the same, but +it is more efficient, and it will work with temporary variables created +by a compiler. + +

If a C function takes a pointer to a single built-in type (not an aggregate +type), the associated C++ method will take a reference variable. +If a C function takes a pointer to a pointer, the C++ function will take +a pointer reference. The references are more efficient, and it is +the behavior a C++ programmer expects. +A pointer to an array remains a pointer to an array. + +

+ + + + + + + + + + + + + +
C function parameter C++ method parameter method's const behavior
int valconst int &valwon't change value
int *singlepint &singlep
const int &singlep
may change value
won't change value
int **singlepint *&singlep
const int * &p
int *const &p
const int * const &p
may change pointer or value
won't change value
won't change pointer to value
won't change anything
int *arraypint *arrayp
const int * arrayp
may change array contents
won't change array contents
+ +

If a C function takes a pointer to an array of char, the +associated C++ method will take a string object. + +

+ + +
C function parameter C++ method parameter
char *fname std::string &fname
+ +

In all honesty, it is tedious to carefully apply const'ness in +parameter declarations, and we did not do it consistently throughout the +C++ wrapping of Zoltan. Please feel free to add const declarations +where they belong, and try to use them correctly if you add or modify Zoltan +C++ methods. + +

Copy constructor, copy operator

+Each class should have a copy constructor and a copy operator. + + +

Keeping the C++ interface up-to-date

+Here we provide a checklist of things to be done when the C interface to +the Zoltan library is changed: + +
    +
  • If a new major component is added to Zoltan, create a C++ class for +that component in a new header file, using the programming conventions +described above.
  • +
  • If functions are added or removed, or their parameter lists are +changed, then update the header file defining the class that contains +those functions.
  • +
  • When Zoltan data structures are changed, be sure to change the C functions +that copy the data structure. (They contain Copy in their name.) +Correct copying is more important in C++, +where the compiler may generate new temporary objects, than it is in C.
  • +
  • If you change the C++ API, be sure to change: +
      +
    • zCPPdrive, the test program for the Zoltan C++ library
    • +
    • the C++ examples in the Examples directory
    • +
    • the method prototypes in the Zoltan User's Guide.
    • +
    +
+ +
+
[Table of Contents  |  Next:  +References  |  Previous:  +FORTRAN Interface  |  Privacy and Security] + + diff --git a/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_degenerate.html b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_degenerate.html new file mode 100644 index 00000000..bc55e39b --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_degenerate.html @@ -0,0 +1,172 @@ + + + + + + + + + Zoltan Developer's Guide: Degenerate Geometries + + + + + + +

+Appendix: Handling Degenerate Geometries

+ +The geometry processed by one of the geometric methods +RCB, RIB, or +HSFC +may be degenerate. By this we mean +it may have 3-dimensional coordinates but be essentially flat, or +it may have 3 or 2-dimensional coordinates but be essentially a line +in space. If we treat the geometry as a lower dimensional object +for the purpose of partitioning, the result may be a more natural +partition (one the user would have expected) and a faster +run time. +

+The caller may set the REDUCE_DIMENSIONS parameter to TRUE +in any of the three geometric methods if they want Zoltan to check +for a degenerate condition and do lower dimensional partitioning +if such a condition if found. They may set the DEGENERATE_RATIO +to specify how flat or thin a geometry must be to be considered +degenerate. +

+ +

+Outline of Algorithm

+ +All three geometric methods call +Zoltan_Get_Coordinates +to obtain the +problem coordinates. If REDUCE_DIMENSIONS is TRUE, we check +in this function to see if the geometry is degenerate. If it is, +we transform the coordinates to the lower dimensional space, flag +that the problem is now lower dimensional, and return the transformed +coordinates. The +RCB, RIB, or +HSFC +calculation is performed on the new coordinates in the lower dimensional +space. +

+If KEEP_CUTS is TRUE, the transformation is saved so that in +Zoltan_LB_Box_Assign or Zoltan_LB_Point_Assign +the coordinates can be transformed before the assignment is calculated. +If RCB_REUSE is TRUE in the RCB method, +the transformation is also saved. On re-partitioning, we can do some +simple tests to see if the degeneracy condition has changed before +completely re-calculating the coordinate transformation. +

+To determine if the geometry is degenerate, we calculate +the same inertial matrix that is calculated for RIB, +except that we ignore vertex weights. The 3 orthogonal eigenvectors +of the inertial matrix describe the three primary directions of the +geometry. The bounding box oriented in these directions is tested +for degeneracy. In particular (for a 3 dimensional geometry) if the +length of the longest side divided by the length of the shortest side +exceeds the DEGENERATE_RATIO, we consider the geometry to be flat. If +in addition, the length longest side divided by the length of the +middle side exceeds the +DEGENERATE_RATIO, we consider the geometry to be essentially a line. +

+If a 3 dimensional geometry is determined to be flat, we transform +coordinates to a coordinate system where the XY plane corresponds +to the oriented bounding box, +and project all coordinates to that plane. These +X,Y coordinates are returned to the partitioning algorithm, which +performs two dimensional partitioning. Similarly if the geometry +is very thin, we transform coordinates to a coordinate system +with the X axis going through the +bounding box in it's principal direction, and project all points to +that axis. Then one dimensional partitioning is performed. +

+There is a small problem in calculating +Zoltan_LB_Box_Assign +when the partitioning was performed +on transformed geometry. The caller provides the box vertices in +problem coordinates, but the partition was calculated in +transformed coordinates. When the vertices are transformed, they +are in general no longer the vertices of an axis-aligned box in +the new coordinate system. The +Box_Assign calculation requires an axis-aligned box, and +so we use the bounding box of the transformed vertices. The resulting +list of processes or parts intersecting the box may therefore +contain some processes or parts which actually do not intersect +the box in problem coordinates, however it will not omit any. + + +

Data Structure Definitions

+The transformation is stored in a Zoltan_Transform_Struct +structure which is +defined in zz/zz_const.h. It is saved as part of the algorithm +specific information stored in the +LB.Data_Structure field of the +Zoltan_Struct. +The flag that indicates whether the geometry was found to be +degenerate is the Target_Dim field of this structure. +

+To use the degenerate geometry detection capability from a new +geometric method, you would add a Zoltan_Transform_Struct +structure to the algorithm specific data structure, add code to +Zoltan_Get_Coordinates to look for it, and check the +Target_Dim field on return to see if the problem dimension +was reduced. You would also need to include the +coordinate transformation in your Box_Assign and Point_Assign +functionality. + + +
  +
  + +

+


+
[Table of Contents  +|  Previous:  Hibert Space Filling Curve (HSFC)  |  Privacy and Security] + + diff --git a/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_dist.html b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_dist.html new file mode 100644 index 00000000..e077988f --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_dist.html @@ -0,0 +1,74 @@ + + + + + + + + + + Zoltan Developer's Guide: Distribution + + + + + + +

+Zoltan Distribution

+The organization of the Zoltan software distribution is described in the +following sections.  Full pathnames are specific to Sandia's CSRI SON +LAN. +
CVS (source code control) +
Layout of Directories +
Compilation and Makefiles
+ +
+
[Table of Contents  |  Next:  +CVS  |  Previous:  Zoltan +Quality Assurance  |  Privacy and Security] + + diff --git a/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_dist_compile.html b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_dist_compile.html new file mode 100644 index 00000000..e9fc2bc7 --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_dist_compile.html @@ -0,0 +1,124 @@ + + + + + + + + + Zoltan Developer's Guide: Compilation + + + + + + + +

+Compilation and Makefiles

+The Zoltan distribution includes a main (top-level) makefile with targets +for the Zoltan library, the test driver programs, and some graphical tools. +When the library is +compiled for a specific target platform, A, Autotools and CMake obtain provide +platform-specific configuration information. +

+Both the Autotools and CMake build systems must be maintained in Zoltan. +Thus, new source code files and directories must be added for both systems. +It is easiest to follow the examples of existing files in the build +environments; good examples +are hsfc.h, hsfc.c, dr_loadbal_const.h and +dr_loadbal.c. +

+To add new source files or source directories: +

    +
  • +Autotools: Add new library files or directories to +zoltan/src/Makefile.am; add +new driver files to zoltan/src/driver/Makefile.am. +In the main +zoltan directory, run the bootstrap-local script to rebuild the +automated Makefiles. +
  • +
  • +CMake: Add new library files to zoltan/src/CMakeLists.txt; add +new driver files to zoltan/src/driver/CMakeLists.txt. +
  • +
+

+Alternatively, new algorithms can be added as separate libraries with which +Zoltan may link. +The implementation of the ParMETIS +interface in Zoltan can serve as an example. +

+To add new third-party libraries: +

    +
  • +Autotools: Add checks new library paths and include files to +zoltan/configure.ac. Search for the string "parmetis" in +zoltan/configure.ac, and follow +its example for the new library. +Also edit +zoltan/src/include/Zoltan_config.h.in to include a macro for +the new third-party library; follow the example of "HAVE_PARMETIS". +In the main +zoltan directory, run the bootstrap-local script to rebuild the +automated configure and Makefiles. +
  • +
  • +CMake: +Follow the instructions in Trilinos/cmake/HOWTO.ADD_TPL +In zoltan/src/CMakeLists.txt +and zoltan/cmake/Zoltan_config.h.in, +follow the example of "HAVE_PARMETIS". +
  • +
+ +
+
[Table of Contents  |  Next:  +Zoltan Interface and Data Structures  |  Previous:  +Layout of Directories  |  Privacy and Security] + + diff --git a/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_dist_cvs.html b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_dist_cvs.html new file mode 100644 index 00000000..c9d61495 --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_dist_cvs.html @@ -0,0 +1,95 @@ + + + + + + + + + + Zoltan Developer's Guide: CVS + + + + + + + +

+CVS

+The source code and documentation for the Zoltan library is maintained +under the Concurrent Versions System (CVS) software. CVS allows multiple +developers to edit their own copies of the software and merges updated +versions with the developers' own versions. Zoltan's repository +is part of the Trilinos repository. + +Developers must set the CVSROOT environment variable to the repository +directory: +
setenv CVSROOT username@software.sandia.gov:/space/CVS +
+where username is the developer's username on the CVS server +software.sandia.gov. +Since Zoltan is part of Trilinos, it is best to check out Trilinos from +the repository: +
cvs checkout -P Trilinos
+Zoltan is then in directory Trilinos/packages/zoltan. + +

+Alternatively, one can get a working copy of only the Zoltan software: +

cvs checkout -P zoltan
+However, changes to the +Autotools build +environment require directories above the Zoltan directory in +the Trilinos repository, so it is best to check out the entire Trilinos +repository. + +

+


+
[Table of Contents  |  Next:  +Layout of Directories  |  Previous:  +Zoltan Distribution  |  Privacy and Security] + + diff --git a/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_dist_dir.html b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_dist_dir.html new file mode 100644 index 00000000..bb0ddff1 --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_dist_dir.html @@ -0,0 +1,280 @@ + + + + + + + + + Zoltan Developer's Guide: Directory Layout + + + + + + +

+Layout of Directories

+The source code is organized into several subdirectories within the Zoltan +main directory. General interface routines are stored +in a single directory. Communication and memory allocation utilities available +to all algorithms are in separate directories. Each load-balancing method +(or family of methods) should be stored in its own directory. +In addition, a courtesy copy of the +ParMETIS +graph-partitioning package is included in the top-level directory ParMETIS. +

+In the following +table, +the source-code directories currently in the Zoltan directory are listed +and described. +
  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
DirectoryDescription
zzGeneral Interface definitions, +Zoltan data structure definitions, interface functions +and functions related to the interface +See Interface Functions, +ID Data Types, and +Data Structures. +
lbLoad-Balancing interface routines, +and load-balancing data structure definitions. +
allSpecial memory allocation functions for memory returned by Zoltan to an application. 
parParallel computing routines.
paramRoutines for changing parameter values at runtime.
parmetisRoutines to access the ParMETIS +and Jostle +partitioning libraries.
rcbRecursive Coordinate Bisection (RCB) and +Recursive Inertial Bisection (RIB) algorithms.
hsfcHilbert Space-Filling Curve partitioning +algorithm. +
bsfcBinned Space-Filling Curve partitioning algorithm. +
octRensselaer Polytechnic Institute's octree partitioning algorithms. +
reftreeWilliam Mitchell's Refinement Tree Partitioning +algorithm and refinement tree data structure.
timerTiming routines.
chRoutines to read Chaco +input files and build graphs for the driver program zdrive.
haRoutines to support heterogeneous architectures.
fortFortran (F90) interface for Zoltan.
Utilities/sharedSimple functions and utilities shared by Zoltan and +other Zoltan Utilities. +
Utilities/MemoryMemory management utilities
Utilities/CommunicationUnstructured communication +utilities
Utilities/DDirectoryDistributed Data Directory +utilities
Utilities/ConfigPlatform-specific +makefile definitions for compiler, library and include-file paths.
driverTest driver programs, zdrive and + zCPPdrive.
fdriverFortran90 version of the test driver program.
examplesSimple examples written in C and C++ that use Zoltan.
docs/Zoltan_htmlZoltan documentation and home page.
docs/Zoltan_html/ug_htmlUser's guide in HTML format.
docs/Zoltan_html/dev_htmlDeveloper's guide in HTML format.
docs/Zoltan_pdfPDF versions of the Zoltan User's Guide and Developer's Guide.
docs/internalSQA documents for the Zoltan project.
The directory structure of the Zoltan distribution.
+ +


+
[Table of Contents  |  Next:  +Compilation  |  Previous:  +CVS  |  Privacy and Security] + + diff --git a/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_driver.html b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_driver.html new file mode 100644 index 00000000..73162aa1 --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_driver.html @@ -0,0 +1,170 @@ + + + + + + + + + Zoltan Developer's Guide: Using Test Drivers: zdrive and zfdrive + + + + + + + +

+Appendix: Using the Test Drivers: zdrive, zCPPdrive and zfdrive

+ +

+Introduction

+In order to facilitate development and testing of the Zoltan library, +simple driver programs, zdrive (C), zCPPdrive (C++) +and zfdrive (Fortran90), +are included with the library +distribution. The concept behind the drivers is to read in mesh or graph +information from files, run Zoltan, and then output the new assignments +for each processor to another file. The test drivers zdrive +and zCPPdrive read ExodusII/NemesisI +parallel FEM files, Chaco +input files, or general +Matrix-Market files. +Parallel NemesisI files can be created from ExodusII or Genesis +file using the NemesisI utilities nem_slice and nem_spread. +The Fortran90 program zfdrive reads only Chaco input files +and general Matrix-Market + files. +

+Source code for zdrive is in +the driver and ch directories of +the Zoltan distribution. +The source code for zCPPdrive is also in driver, and uses some +of the same C source files (in driver and ch) that zdrive uses. +Source code for zfdrive is in the fdriver directory. +

+The C and C++ test drivers are built automatically in the +Autotools build system. +The F90 driver is built when the configure option "--enable-f90interface" is +provided. The drivers are placed in BUILD_DIR/src/driver and +BUILD_DIR/src/fdriver, for build directory BUILD_DIR. +

+The C and C++ test drivers are built in the +CMake build system when +option "-D Zoltan_ENABLE_Tests:BOOL=ON" is provided. The F90 driver is +built when option "-D Zoltan_ENABLE_F90INTERFACE:BOOL=ON" is also provided. +The drivers are placed in BUILD_DIR/packages/zoltan/src/driver and +BUILD_DIR/packages/zoltan/src/fdriver; they are also copied to +BUILD_DIR/packages/zoltan/test. +

+Running the Test Drivers

+The test drivers are run using an input command file. A fully commented example of +this file and the possible options can be found in zdrive.inp. +The default name for the command file is zdrive.inp, and the drivers will +look for this file in the execution directory if an alternate name is not +given on the command line. If another filename is being used for the command +file, it should be specified as the first argument on the command line. +(Note: zfdrive cannot read a command line argument; its input file +must be named zdrive.inp.) +

+For an example of a simple input file, see the figure +below. In this problem, the method being used for dynamic load balancing +is RCB.  Input data is read from Chaco +input files simple.graph and simple.coords.  Zoltan's +DEBUG_LEVEL parameter +is set to 3; default values of all other parameters are used. +(Note: zfdrive can read only a simplified version of the input file. +See the zfdrive notes in zdrive.inp for more details.) +
  +

+ + + + + +
+ +Decomposition method  = rcb +
Zoltan Parameters     = Debug_Level=3 +
File Type             += Chaco +
File Name             += simple +
Parallel Disk Info    = number=0 +
+
Example zdrive.inp file
+ +

The zdrive programs creates ascii files named +"file_name.out.p.n", where file_name +is the file name specified in zdrive.inp, p is the number +of processors on which zdrive was run, and n=0,1,...,p-1 is +the processor by which the file was created. (For zfdrive, the +files are named "file_name.fout.p.n".) +These files are in the same directory where the input graph +file was located for that processor. Each file contains a list of global +ids for the elements that are assigned to that processor after running Zoltan. +The input decomposition can also be written in this format to files +"file_name.in.p.n"; +see "zdrive debug level" in zdrive.inp for more details. +

+Decompositions for 2D problems can be written to files that can be plotted +by gnuplot. See "gnuplot output" in zdrive.inp +for more information. Decompositions for 3D problems can be viewed after the +test driver has finished by running the graphical tools vtk_view or +vtk_write described next. +

+Adding New Algorithms

+The driver has been set up in such a way that testing new algorithms that +have been added to Zoltan is relatively simple. The method that is in the +input file is passed directly to Zoltan. Thus, this string must be the +same string that the parameter LB_METHOD is expecting. +

+
+
[Table of Contents  |  Next:  +Visualizing Geometric Partitions  |  Previous:  +References  |  Privacy and Security] + + + + + + + + + + + Zoltan Developer's Guide: FORTRAN Interface + + + + + + +

+FORTRAN Interface

+With any change to the user API of Zoltan, the Fortran interface should +be modified to reflect the change. This section contains information about +the implementation of the Fortran interface which should cover most situations. + +If you have questions or need assistance, contact william.mitchell@nist.gov. + +

If changes are made to functions that are called by zdrive, +then the changes should also be made to +zfdrive. Changes to the +Fortran interface can be tested by building and running zfdrive, +if the changes are in functions called by zfdrive. +The zfdrive program +works the same way as zdrive +except that it is restricted to the +Chaco +examples and simpler input files. + +

Any changes in the interface should also be reflected in the Fortran +API definitions in the Zoltan User's Guide. +

+Structures

+All structures in the API have a corresponding user-defined type in the +Fortran interface. If a new structure is added, then modifications will +be required to fort/fwrap.fpp and fort/cwrap.c. In these +files, search for Zoltan_Struct +and "do like it does." + +

An explanation of how structures are handled may help. The Fortran user-defined +type for the structure simply contains the address of the structure, i.e., +the C pointer returned by a call to create the structure. Note that the +user does not have access to the components of the structure, and can only +pass the structure to functions. Within the Fortran structure, the +address is stored in a variable of type(Zoltan_PTR), which is a character +string containing one character for each byte of the address. This gives +the best guarantee of portability under the Fortran and C standards. Also, +to insure portability of passing character strings, the character string +is converted to an array of integers before passing it between Fortran +and C. The process of doing this is most easily seen by looking at Zoltan_Destroy, +which has little else to clutter the code. +

+Modifications to an existing Zoltan +interface function

+If the argument list or return type of a user-callable function in Zoltan +changes, the same changes must be made in the Fortran interface routines. +This involves changes in two files: fort/fwrap.fpp and fort/cwrap.c. +In these files, search for the function name with the prefix Zoltan_ omitted, +and modify the argument list, argument declarations, return type, and call +to the C library function as appropriate. When adding a new argument, if +there is not already an argument of the same type,  look at another +function that does have an argument of that type for guidance. +

+Removing a Zoltan interface function

+If a user callable function is removed from the Zoltan library, edit fort/fwrap.fpp +and fort/cwrap.c to remove all references to that function. +

+Adding a new Zoltan interface function

+Adding a new function involves changes to the two files fort/fwrap.fpp +and fort/cwrap.c. Perhaps the easiest way to add a new function +to these files is to pick some existing function, search for all occurrences +of it, and use that code as a guide for the implementation of the interface +for the new function. Zoltan_LB_Point_Assign +is a nice minimal function to use as an example. Use a case insensitive +search on the name of the function without the Zoltan_LB_ prefix, for example +point_assign. + +

Here are the items in fwrap.fpp: +

    +
  • +public statement: The name of the function should be included in the list +of public entities.
  • + +
  • +interface for the C wrapper: Copy one of these and modify the function +name, argument list and declarations for the new function. The name is +of the form Zfw_LB_Point_Assign (fw stands for Fortran wrapper).
  • + +
  • +generic interface: This assigns the function name to be a generic name +for one or more module procedures.
  • + +
  • +module procedure(s): These are the Fortran-side wrapper functions. Usually +there is one module procedure of the form Zf90_LB_Point_Assign. If +one argument can have more than one type passed to it (for example, it +is type void in the C interface), then there must be one module procedure +for each type that can be passed. These are distinguished by appending +a digit to the end of the module procedure name. If n arguments can have +more than one type, then n digits are appended. See Zoltan_LB_Free_Part +for example. Generally the module procedure just calls the C-side wrapper +function, but in some cases it may need to coerce data to a different +type (e.g., Zoltan_Struct), +or may actually do real work (e.g., Zoltan_LB_Free_Part).
  • + +
     
+Here are the items in cwrap.c: +
    +
  • +name mangling: These are macros to convert the function name from the case +sensitive C name (for example, Zfw_LB_Point_Assign) to the mangled +name produced by the Fortran compiler. There are four of these for each +function:
  • + +
      +
    • +lowercase (zfw_lb_point_assign),
    • + +
    • +uppercase (ZFW_LB_POINT_ASSIGN),
    • + +
    • +lowercase with underscore (zfw_lb_point_assign_), and
    • + +
    • +lower case with double underscore (zfw_point_assign__ but the +second underscore is appended only if the name already contains an underscore, +which will always be the case for names starting with Zfw_).
    • +
    + +
  • +C-side wrapper function: Usually this just calls the Zoltan library function +after coercing the form of the data (for example, constructing the pointer +to Zoltan_Struct and call-by-reference +to call-by-value conversions).
  • +
+ +

+Query functions

+If a query function is added, deleted or changed, modifications must be +made to fort/fwrap.fpp and fort/cwrap.c, similar to the modifications +for interface functions, and possibly also include/zoltan.h and zz/zz_const.h. + +

Here are the places query functions appear in fwrap.fpp: +

    +
  • +public statement for the ZOLTAN_FN_TYPE +argument: These are identical to the C enumerated type.
  • + +
  • +definition of the ZOLTAN_FN_TYPE +arguments: There are two groups of these, one containing subroutines (void +functions) and one containing functions (int functions). Put the new symbol +in the right category. The value assigned to the new symbol must agree +exactly with where the symbol appears in the order of the enumerated type.
  • +
+Here are the places query functions appear in cwrap.c: +
    +
  • +reverse wrappers: These are the query functions that are actually called +by the Zoltan library routines when the query function was registered from +Fortran. They are just wrappers to call the registered Fortran routine, +coercing argument types as necessary. Look at Zoltan_Num_Edges_Fort_Wrapper +for an example.
  • + +
  • +Zfw_Set_Fn: This has a switch based on the value of the ZOLTAN_FN_TYPE +argument to set the Fortran query function and wrapper in the Zoltan_Struct.
  • +
+In zz/zz_const.h, if a new field is added to the structures for +a new query function, it should be added in both C and Fortran forms. In +include/zoltan.h, if a new typedef for a query function is added, +it should be added in both C and Fortran forms. See these files for examples. +

+Enumerated types and defined constants

+Enumerated types and defined constants that the application uses as the +value for an arguments must be placed in fwrap.fpp with the same +value. See ZOLTAN_OK +for an example.  +
+
[Table of Contents  |  Next:  +C++ Interface  |  Previous:  +Migration Tools  |  Privacy and Security] + + diff --git a/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_hier.html b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_hier.html new file mode 100644 index 00000000..64db15be --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_hier.html @@ -0,0 +1,133 @@ + + + + + + + Zoltan Developer's Guide: HIER + + + + + + +

+Appendix: Hierarchical Partitioning (HIER)

+  + +

+Outline of Algorithm

+ +

Zoltan's hierarchical balancing automates the creation of hierarchical +partitions. The implementation of Hierarchical Partitioning (HIER) in Zoltan is +due to Jim Teresco (Williams College), with many suggestions and ideas +from the entire Zoltan team.

+ +The hierarchical balancing implementation utilizes a lightweight +intermediate structure and a set of callback functions that permit an +automated and efficient hierarchical balancing which can use any of +the procedures available within Zoltan without modification and in any +combination. Hierachical balancing is invoked by an application the +same way as other Zoltan procedures. Since Zoltan is data-structure +neutral, it operates on generic "objects" and interfaces with +applications through callback functions.

+ +A hierarchical balancing step begins by building an intermediate +structure using the application-supplied callbacks. The initial +structure is computed using Zoltan_Build_Graph to build the +same structure that is used to support partitioning using the Parmetis +and Jostle libraries. The hierarchical balancing procedure then +provides its own callback functions to allow existing Zoltan +procedures to be used to query and update the intermediate structure +at each level of a hierarchical balancing. The structure is augmented +to keep track of objects and their needed information during the +procedure. After all levels of the hierarchical balancing have been +completed, Zoltan's usual migration arrays are constructed and +returned to the application. Thus, only lightweight objects are +migrated internally between levels, not the (larger and more costly) +application data. More details about the fields of the intermediate +structure and how the are used can be found in the Zoltan distribution +in hier/README.

+ +
  + +

+Data Structure Definitions

+ +

There is one major data structure in HIER, defined in +hier/hier.h. The struct HierPartParamsStruct includes +all information needed by the hierarchical balancing procedures. The +fields are documented in comments in the structure definition. + +
  + +

+Parameters

+ +

The parameters used by HIER and their default values are described in the +HIER section of the Zoltan User's +Guide. These can be set by use of the Zoltan_Hier_Set_Param subroutine +in the file hier/hier.c. + +
  + +

+Main Routine

+ +

The main routine for HIER is Zoltan_Hier in the file hier/hier.c. + +
  +
  +
  + +

+


+
[Table of +Contents  |  Next:  +Recursive Inertial Bisection (RIB)  |  +Previous:  Using the Test Script  |  Privacy and Security] + + diff --git a/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_hsfc.html b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_hsfc.html new file mode 100644 index 00000000..b54d59f7 --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_hsfc.html @@ -0,0 +1,298 @@ + + + + + + + + + Zoltan Developer's Guide: HSFC + + + + + +

+Appendix: Hilbert Space Filling Curve (HSFC)

+  + +

+Outline of Algorithm

+ +This partitioning algorithm is loosely based on the 2D & 3D Hilbert tables used in Octree +and on the BSFC partitioning implementation by Andrew C. Bauer, Department of +Engineering, State University of New York at Buffalo, as his summer project at +SNL in 2001. Please refer to the corresponding section in the Zoltan User's guide, +Hilbert Space Filling Curve (HSFC), +for information about how to use this module and its parameters. Note: the partitioning, +point assign and box assign functions in this code module can be trivially extended +to any space filling curve for which we have a state table definition of the curve. +

+First, the weights and inverse Hilbert coordinates for each object +are determined. If the objects do not have weights, unit weights are assigned. +If the objects have multiple weights, only the first weight is currently used. The smallest +axis-aligned box is found that contains +all of the objects using their two or three dimensional spatial coordinates. +This bounding box is slightly expanded to ensure that all objects are strictly +interior to the boundary surface. The bounding box is necessary in order to calculate +the inverse Hilbert Space Filling curve coordinate. The bounding box is used to +scale the problem coordinates into the [0,1]^d unit volume (d represents the number of dimensions +in the problem space.) The inverse Hilbert +coordinate is calculated and stored as a double precision floating point value for +each object. This code works on problems with one, two or three dimensions (the +1-D Inverse Hilbert coordinate is simply the problem coordinate itself, after the +bounding box scaling.) +

+The algorithm seeks to cut the unit interval into P segments containing equal +weights of objects associated to the segments by their inverse Hilbert coordinates. +The code allows a user vector to specify the desired fraction +of the total weight to be assigned to each interval. Note, a zero weight fraction prevents any object +being assigned to the corresponding interval. The unit interval is divided into N bins, +N=k(P-1)+1, where k is a +small positive constant.) Each bin has an left and right endpoint +specifying the half-open interval [l,r) associated with the bin. The bins form a +non-overlapping cover of [0,1] with the right endpoint of the last bin forced to include 1. +The bins are of equal size on the first loop. (Hence each interval or part of the +partition is a collection of bins.) +

+For each loop, an MPI_Allreduce call is made to +globally sum the weights in each bin. This call also determines the maximum and +minimum (inverse Hilbert) coordinate found in each bin. A greedy algorithm sums the +weights of the bins from left to right until the next bin would cause an overflow for +the current part. This results in new partition of P intervals. The location of +each cut (just before an "overflowing" bin) and the size of its "overflowing" bin are +saved. The "overflowing" bin's maximum and minimum are compared to determine if the bin +can be practically subdivided. (If the bin's maximum and minimum coordinates are too +close relative to double precision resolution, the bin can not be practically +subdivided.) If at least one bin can be further refined, then looping will continue. +In order to prevent a systematic bias, the greedy algorithm is assumed to exactly +satisfy the weight required by each part. +

+Before starting the next loop, the P intervals are again divided into N bins. The +P-1 "overflow" bins are each subdivided into k-1 equal bins. The +intervals before and after these new bins determine the remaining bins. This process +maintains a fixed number of bins. No bin is "privileged." Specifically, any bin is +subject to later refinement, as necessary, on future loops. +

+The loop terminates when there is no need to further divide any "overflow" bin. A slightly +different greedy algorithm is used to determine the final partition of P intervals from the +N bins. In this case, when the next bin would cause an overflow, the tolerance +is computed for both underfilling (excluding this last bin) and overfilling +(including the last bin). The tolerance closest to the target tolerance is +used to select the dividing point. The tolerance obtained at each dividing +point is compared to the user's specified tolerance. An error is returned if +the user's tolerance is not satisfied at any cut. After each cut is made, a +correction is calculated as the ratio of the actual weight to the target +weight used up to this point. This correction is made to the target weight +for the next part. This correction fixes the subsequent parts when +a "massive" weight object is on the border of a cut and its assignment creates an +excessive imbalance. +

+Generally, the number of loops is small (proportional to log(number of objects)). +A maximum of MAX_LOOPS is +used to prevent an infinite looping condition. A user-defined +function is used in the MPI_Allreduce call in order to simultaneously determine the +sum, maximum, and minimum of each bin. The message length in the MPI_Allreduce is +proportional to the P, the number of parts. +

+Note, when a bin is encountered that satisfies more than two parts, that bin is refined +into a multiple of k-1 intervals which maintains a total of N bins. +
  +

Hilbert Transformations

+The HSFC now uses table driven logic to convert from spatial coordinates (2 or 3 dimensions) +(the Inverse Hilbert functions) and from the unit interval into spatial coordinates +(Hilbert functions). In each case there are two associated tables: the data table and the +state table. In all cases, the table logic can be extended to any required precision. Currently, +the precision is determined for compatibility with the the double precision used in +the partitioning algorithm. +

The inverse transformation is computed by taking the highest order bit from each spatial +coordinate and packing them together as 2 or 3 bits (as appropriate to the dimensionality) +in the order xyz (or xy) where x is the highest bit in the word. +The initial state is 0. The data table lookup finds the value +at the column indexed by the xyz word and the row 0 (corresponding to the initial state value.) +This data are the 3 (or 2) starting bits of the Hilbert coordinate. The next state value +is found by looking up the corresponding element of the state table (xyz column and row 0.) +

+The table procedure continues to loop (using loop counter i, for example) until the required +precision is reached. At loop i, the ith bits from each spatial dimension are packed together +as the xyz column index. The data table lookup finds the element at column xyz and the row +determined by the last state table value. This is appended to the Hilbert coordinate. The +state table is used to find the next state value at the element corresponding to the xyz +column and row equal to the last state value. +

+The inverse transformation is analogous. Here the 3 (or 2 in the 2-d case) bits of the +Hilbert coordinate are extracted into a word. This word is the column index into the +data table and the state value is the row. This word found in the data table is +interpreted as the packed xyz bits for the spatial coordinates. These bits are +extracted for each dimension and appended to that dimension's coordinate. The corresponding +state table is used to find the next row (state) used in the next loop. + + +
  +

Point Assign

+The user can use +Zoltan_LB_Point_Assign +to add a new point to the +appropriate part. The bounding box coordinates, +the final partition, and other related information are maintained after partitioning if the KEEP_CUTS +parameter is set by the user. The KEEP_CUTS parameter must be set by the user for Point Assign! +The extended bounded box is +used to compute the new point's inverse Hilbert coordinate. Then the +routine performs a binary search on the final partition to determine the part (interval) which +includes the point. The routine returns the part number assigned to that +interval. +

+The Point Assign function now works for any point in space, even if the point is +outside the original bounding box. If the point is outside the bounding box, it is first +scaled using the same equations that scale the interior points into the unit volume. +The point is projected onto the unit volume. For each spatial dimension, if the scaled +coordinate is less than zero, it is replace by zero. If it is greater than one, it is +replaced by one. Otherwise the scaled coordinate is directly used. + + +
  +

Box Assign

+The user can use +Zoltan_LB_Box_Assign +to determine the parts whose +corresponding subdomains intersect the user's query box. +Although very different in implementation, the papers by Lawder and King ("Querying Multi- +dimensional Data Index Using the Hilbert Space-Filling Curve", 2000, etc.) were the original +inspiration for this algorithm. The Zoltan_HSFC_Box_Assign routine primarily scales the +user query region and determines its intersection with the Hilbert's bounding box. Points +exterior to the bounding box get projected along the coordinate axis onto the bounding box. +A fuzzy region is built around query points and lines to create the boxes required for the search. +It also handles the trivial one-dimensional case. Otherwise it repeatedly calls the 2d and 3d +query functions using the next highest part's left end point to start the search. These query +routines return the next point on the Hilbert curve to enter the query region. A binary +search finds the part associated with this point. The query functions are called one more +time than the number of parts that have points interior to the query region. +

+The query functions decompose the unit square (or cube) level by level like the Octree method. +Each level divides the remaining region into quadrants (or octets in 3d). At each level, the +quadrant with the smallest inverse Hilbert coordinate (that is, occurring first along the Hilbert curve) +whose inverse Hilbert coordinate is equal or larger than the starting inverse Hilbert coordinate and which +intersects with query region is selected. Thus, each level calculates the next 2 bits +(3 bits in 3d) of the inverse Hilbert coordinate of the next point to enter the query region. No more +than once per call to the query function, the function may backtrack to a nearest previous +level that has another quadrant that intersects the query region and has a higher Hilbert coordinate. +

+In order to determine the intersection with the query region, the next 2 bits (3 in 3 dimensions) of +the Hilbert transformation +are also computed (by table lookup) at each level for the quadrant being tested. These bits are +compared to the the bits resulting from the intersection of the query region with the region +determined by the spatial coordinates computed to the precision of the previous levels. +

+If the user query box has any side (edge) that is "too small" (effectively degenerate in +some dimension), it is replaced by a minimum value and the corresponding vertex coordinates +are symmetrically expanded. This is refered to as a "fuzzy" region. +

+This function requires the KEEP_CUTS parameter to be set by the user. +The Box Assign function now works for any box in space, even if it has regions outside the +original bounding box. The box vertices are scaled and projected exactly like the points +in the Point Assign function described above. However, to allow the search to use a proper +volumn, projected points, lines, and planes are converted to a usable volume by the +fuzzy region process described above. +

+This algorithm will work for any space filling curve. All that is necessary is to +provide the tables (derieved from the curve's state transition diagram) in place of +the Hilbert Space Filling Curve tables. + + +
  + +

Data Structure Definitions

+The data structures are defined in hsfc/hsfc.h. The objects being load balanced +are represented by the Dots Structure which holds the objects spacial coordinates, +the corresponding inverse Hilbert coordinate, the processor owning the object, +and the object's weight(s). The Partition structure holds the left and right +endpoints of the interval represented by this element of the partition and the index +to the processor owning this element of the partition. The structure HSFC_Data +holds the "persistant" data +needed by the point assign and box assign routines. This includes the bounding box, +the number of loops necessary for load balancing, the number of dimensions for the problem, +a pointer to the function that returns the inverse Hilbert Space-Filling Curve +coordinate, and the final Partition structure contents. + +

+ +

+Parameters

+ +

The parameters used by HSFC and their default values are described in the +HSFC section of the Zoltan User's +Guide. These can be set by use of the Zoltan_HSFC_Set_Param subroutine +in the file hsfc/hsfc.c. +

+When the parameter REDUCE_DIMENSIONS +is specified, the HSFC algorithm will perform lower dimensional +partitioning if the geometry is found to be degenerate. More information +on detecting degenerate +geometries may be found in another +section. + + +
  + +

+Main Routine

+ +

The main routine for HSFC is Zoltan_HSFC in the file hsfc/hsfc.c. + +
  +
  +
  + +

+


+
[Table of Contents  +|  Next:   Handling Degenerate Geometries + +|  Previous:  Refinement Tree  |  Privacy and Security] + + diff --git a/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_intro.html b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_intro.html new file mode 100644 index 00000000..8d96ac04 --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_intro.html @@ -0,0 +1,82 @@ + + + + + + + + + + Zoltan Developer's Guide: Introduction + + + + + + + +

+Introduction and General Principles

+The goal of the Zoltan project is to design a general-purpose tool for +parallel data management for unstructured, dynamic applications.  This tool includes a suite of load-balancing +algorithms, an unstructured communication package, distributed data directories, and dynamic debugging tools that can be used by a variety of applications.  It will, thus, +be used by many application developers and be added to by many algorithm +developers.  Software projects of this scale need general guidelines +and principles so that the code produced is easily maintained and added +to.  We have tried to keep restrictions on developers to a minimum.  +However, we do require that a few coding practices be followed.  These +guidelines are described in the following sections: +
Philosophy of Zoltan +
Coding Principles in Zoltan +
Zoltan Quality Assurance +
+ +
+
[Table of Contents  |  Next:  +Philosophy of Zoltan  |  Previous:  +Table of Contents  |  Privacy and Security] + + diff --git a/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_intro_coding.html b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_intro_coding.html new file mode 100644 index 00000000..7f6b2c5e --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_intro_coding.html @@ -0,0 +1,255 @@ + + + + + + + + + + Zoltan Developer's Guide: Coding Principles + + + + + + +

+Coding Principles in Zoltan

+
+
    Include files +
    Global Variables +
    Function Names +
    Parallel Communication +
    Memory Management +
    Errors, Warnings and Return Codes +
+
+ +

+Include files

+Include files should be used for function prototypes, macro definitions, +and data structure definitions. The convention used is that external function +prototypes and data structure definitions +required by more than one module are stored in include files named +*_const.h (e.g., zz/zz_const.h). Include files with static +function prototypes or static data structure +definitions (i.e., files that are included in only one module) are +named *.h (e.g., rcb/rcb.h). + +

The include file include/zoltan.h contains the Zoltan +interface; it should be included by C application source files that call +Zoltan. C++ applications that use the C++ interface should include +include/zoltan_cpp.h instead. +

+The include file zz/zz_const.h describes +the principle Zoltan data structures. As these data structures +are used heavily by the algorithms in Zoltan, zz/zz_const.h +should be included in most source files of Zoltan. +

+Every Zoltan C language header file should be surrounded with an +extern "C" {} declaration. The declaration must occur after +every other #include statement, and before all function +declarations. +This declaration tells a C++ compiler not to +mangle the names of functions declared in that header file. +

+ + + +
+ +#ifndef __EXAMPLE_H
+#define __EXAMPLE_H

+ +#include "mpi.h"
+#include "zoltan_types.h"
+#include "zoltan_align.h"

+ +#ifdef __cplusplus
+extern "C" {
+#endif

+ +int func1(int a, int b);
+double dfunc(int a, int b, int c);

+ +#ifdef __cplusplus
+} /* closing bracket for extern "C" */
+#endif

+ +#endif /* __EXAMPLE_H */
+
+
Example of C language header file with extern "C" +
+
+

+If an #include statement appears after the opening of the +extern "C" {} declaration, the included file may cause +mpi.h or some other system header file to be processed. When +compiling with a C++ compiler, this usually leads to compile errors +because the function names in some of those headers are supposed to be +mangled. +

+It should +not be necessary to use the declaration in all header files, but +rather only in header files that are used in C++ applications. But +experience has taught us that you never know what header files will +end up being included, and that one that is not included now, may be +included in the future when someone adds an #include statement +to a file. To save someone the effort later on of figuring out +why their C++ compilation is failing, please include the +extern "C" {} declaration in every header file, even if at +this point in time you do not believe it will ever be included in +the compilation of a C++ application. +
  +

+Global variables

+The use of global variables is highly discouraged in Zoltan. +In limited cases, static global variables can be tolerated within +a source file of an algorithm. However, developers should keep in mind +that several Zoltan structures may be used by an application, with +each structure using the same algorithm. Thus, global variables set by one +invocation of a routine may be reset by other invocations, causing errors +in the algorithms. Global variable names may also conflict with variables +used elsewhere in the library or application, causing unintended side-effects +and complicating debugging. For greatest robustness, developers are asked +NOT to use global variables in their algorithms. See Data +Structures for ideas on avoiding the use of global variables. +
  +

+Function Names

+In order to avoid name conflicts with applications and other libraries, +all non-static functions should be prepended with Zoltan_.  +Moreover, function names should, in general, include their module names; e.g., +Zoltan_HSFC_Box_Assign is part of the HSFC module of Zoltan. +As a general +rule, each new word in a function name should be capitalized (for example, +Zoltan_Invert_Lists). +Static Zoltan functions do not have to follow these rules. + +

+Parallel Communication

+All communication in the Zoltan library should be performed through MPI +communication routines. The MPI interface was chosen to enable portability +to many different platforms. It will be especially important as the code +is extended to heterogeneous computing systems. + +

Some useful communication utilities are provided within the library +to perform unstructured communication and synchronization. See Unstructured +Communication Utilities and Parallel +Computing. +
  +

+Memory Management

+It is strongly suggested that all memory allocation in the library is handled +using the functions supplied in Utilities/Memory. Use of these functions +will make debugging and maintenance of the library much easier as the library +gets larger. See Memory Management Utilities for +more information on these utilities.

+ +For memory that is returned by Zoltan to an application, however, special +memory allocation functions must be used to maintain compatibility with +both C and Fortran90 applications. See +Memory Management in Zoltan Algorithms for more +information.

+ +One of the few data types specified for use in the Zoltan interface is the +ZOLTAN_ID_PTR +type used for global and local object identifiers (IDs). Macros simplifying +and providing error checking for +ID allocation and manipulation +are provided. + +
  +

+Errors, Warnings, and Return Codes

+If an error or warning occurs in the Zoltan library, +a message should be printed to +stderr (using one of the printing macros below), +all memory allocated in the current function should be freed, and +an error code should be returned. +The Zoltan library should never "exit"; +control should always be returned to the application with an error code. +The error codes are defined in include/zoltan_types.h. +

+Currently, this philosophy is not strictly followed in all portions +of Zoltan. +Efforts are underway to bring existing code up-to-date, and to follow this +rule in all future development. +
  +


+ZOLTAN_PRINT_ERROR(int processor_number, char *function_name, char *message)
+ZOLTAN_PRINT_WARN(int processor_number, char *function_name, char *message)
+
+Macros for printing error and warning messages in Zoltan. The macros are +defined in Utilities/shared/zoltan_util.h. + + + + + + + + + + + + + + + + + +
Arguments:
    processor_number The processor's rank in the Zoltan communicator. The value -1 can be used if the rank is not available.
    function_name A string containing the name of the function in which the error or warning occurred.
    message A string containing the error or warning message.
+
+

+ +


+
[Table of Contents  |  Next:  +Zoltan Quality Assurance  |  Previous:  +Philosophy  |  Privacy and Security] + + diff --git a/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_intro_philosophy.html b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_intro_philosophy.html new file mode 100644 index 00000000..b73b2b24 --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_intro_philosophy.html @@ -0,0 +1,91 @@ + + + + + + + + + Zoltan Developer's Guide: Philosophy + + + + + + +

+Philosophy of Zoltan

+The Zoltan library is designed to be a general-purpose tool-kit +providing a variety of parallel data management services to a wide range +of scientific applications (see the Zoltan +User's Guide). To enable general use of the library, the library does +not directly access the data structures of an application. Instead, the +library obtains information it needs through +an object-oriented interface between Zoltan and the +application. +This interface uses call-back query functions to gather information. An +application developer must write and register these query functions before +using Zoltan. The intent, however, is that the number +and complexity of these query functions are low, allowing applications +to easily interface with the library. In addition, new algorithm +development would use the same query functions as previous algorithms, +enabling applications to use new algorithms without changes to the query +functions. + +

In developing new algorithms for Zoltan, the developer +must write the code that calls the query functions to build the needed +data structures for the algorithm. However, the application need not change +its query functions. Thus, new algorithms can be added to the library and +used by an application with minimal effort on the part of the application +developer. +

+


+
[Table of Contents  |  Next:  +Coding Principles  | Previous:  +Introduction  |  Privacy and Security] + + diff --git a/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_intro_sqe.html b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_intro_sqe.html new file mode 100644 index 00000000..dca3acfb --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_intro_sqe.html @@ -0,0 +1,389 @@ + + + + + + + + + + Zoltan Developer's Guide: Quality Program + + + + + + +

+Zoltan Quality Assurance

+This document describes the Software Quality Assurance (SQA) +policies and procedures used in the Zoltan project. Zoltan developers +at Sandia or under contract to Sandia are required to follow these +software development policies. + + + +
+Quality Policy +
Quality Definition +
Classification of Defects +
Release Policy +
Software Quality Tools +
Software Quality Processes +
Zoltan´s implementation of the ASC Software Quality Engineering Practices +
+ + + +

+Quality Policy +

+

+Sandia´s ASC Quality Management Council (AQMC) developed and manages the +Quality Assurance Program (QAP) for Sandia´s ASC program. The AQMC chartered +the development of the Sandia National Laboratories Advanced Simulation and Computing +(ASC) Software Quality Plan, Part 1: ASC Software Quality Engineering Practices, Version 2.0 +document (SAND 2006-5998) as the practical SQA guidance for projects like Zoltan. +A companion document, Sandia National Laboratories Advanced Simulation and Computing +(ASC) Software Quality Plan, Part 2: Mappings for the ASC Software Quality Practices (SAND 2006-5997), +shows how these practices satisfy corporate policies including CPR001.3.6, Corporate +Software Engineering Excellence, and DOE/NNSA orders 414.1C and QC-1 rev 10. +

+The Zoltan project is committed to a program of quality improvement in compliance +with the ASC Software Quality Engineering Practices document. The Zoltan Team Leader is the owner +of the Zoltan quality system. Zoltan developers at Sandia or under contract to Sandia +are required to follow these software development practices. The Zoltan team shall +participate in all reporting processes, audits, and assessments as directed by the AQMC. + +

Quality Definition

+

+QC-1 rev 10 defines quality as "the degree to which customer requirements are met." +

+The Zoltan project accepts the following definition of quality: +"the totality of characteristics of a product or service that bear +on its ability to satisfy stated or implied needs." This is Juran´s +"fitness for use" definition of quality (ANSI/ASQC A8402-1994.) +This superior definition of quality fully satisfies the QC-1 rev 10 definition. +This definition is also more useful in a research environment where the requirements are +derived from a research proposal rather than directly from customers and end users. + +

Classification of Defects

+The Zoltan project accepts the following system of classification of +defects: +
+Critical: A defect that could lead to loss of life, +significant environmental damage, or substantial financial loss. +
Major: A non critical defect that significantly +impacts Zoltan's fitness for use. +
Minor: A (non critical, non major) defect that +reasonably impacts Zoltan's fitness for use. +
Incidental: Any other defect which does not +reasonably reduce Zoltan's fitness for use. +
+

+ +

Release Policy

+

+Only the Zoltan team leader may authorize (certify) a release. +The Zoltan team leader shall not release software with +any known critical or major defects. +User registration shall allow the Zoltan team to +notify all Sandia and ASC users and to recall their +defective software if a critical or major defect +is discovered after release. +The Zoltan team leader may determine that it is +acceptable to release software with known minor or incidental defects. +

+ +

Software Quality Tools

+

+Because of the small scale of the Zoltan Project, only a few, simple tools +are required for use by Zoltan developers: +

+CVS: maintains code, documentation, meeting +notes, emails, and QA program artifacts; +
Purify, PureCoverage, Quantify (Rational), Valgrind, gdb: +for dynamic code testing, coverage measurements, and performance analysis; +
Bugzilla: tracks bugs, requests for changes, +and enhancements; +
Mailman: creates email lists to automatically +notify users by area(s) of interest; +
Makefiles: ensures proper compilation and linking +for all supported platforms; and +
Zoltan Test Script: runs +integration, regression, release and acceptance testing. +
+ +

Software Quality Processes

+

+Bug Reporting, Issue Tracking, Enhancement Requests: All of these +items are now directly entered into Bugzilla by developers and users. +This "process" is built into the tool. Detailed instructions +for using Bugzilla are found on the Zoltan web page. Bugzilla also +provides query and report features for tracking the status of entered items;. +

+A process is defined as a sequence of steps performed for a given +purpose (IEEE Std. 610.12.) Zoltan´s other processes are defined as +checklists because checklists are one of the seven fundamental quality tools. +These checklists are also the primary artifact created when following a process. +Currently the following processes are defined: + +

+Development: (not currently used) defines the software development +process including +requirements, design, implementation, testing, reviews, and approvals; +
Release: defines the release process including testing requirements +and creation of the release product; +
Request: defines the process of +capturing user requests for new features; +
  Note: this process is now obsolete. Request processes in progress +may continue until complete but new requests should use Bugzilla; +
Requirement: the process of capturing user comments that +may become requirements after review and approval; +
  Note: this process is now obsolete. Requirement processes in progress +may continue until complete but new requirements should use Bugzilla; +
Review: defines the materials reviewed prior to acceptance +for Zoltan release; +
   Note: Developers are encouraged to use Bugzilla to enter the +specific review process rather than use the Review checklist. At this time this +is an trial effort and either method may be used. +
Third Party Software:defines the steps required to obtain, manage, +use, and test for software created outside of Zoltan and the ASC program; and +
Training: defines the material a new developer must read, required +skills to demonstrate and computer accounts that must be obtained. +
+ +

+Zoltan's software quality process checklists define how work may be performed, +including process ownership, authorization to perform, activities and their +sequence (when sequencing is required), process instructions, metrics, and +identification of who performed each activity. +

+The only allowed source for process checklists is Zoltan's CVS repository +in the SQA_templates directory (under Zoltan_Internal.) A Zoltan developer +initiates a process by obtaining the current CVS version of the process, renaming +it, and committing the renamed process checklist back into CVS in an appropriate +directory on the same day. The process may continue under this committed version +even if its original process is later superseded unless specifically requested by +the Team Leader. After one or more activities are completed, the process +checklist is updated to reflect the results and committed back to CVS (with +appropriate comments.) A process is completed when all required activities +are completed including reviews and approvals (as necessary), and committed to CVS. +The final CVS comment should indicate that the process is complete. +

+

+Zoltan´s implementation of the ASC Software Quality Engineering Practices

+

+The following is brief description for Zoltan developers about the Zoltan +project´s implementation of the ASC Software Quality Engineering Practices (SAND 2006-5998): +

+PR1. Document and maintain a strategic plan.
+The Zoltan web page has a direct hyperlink to the Zoltan Project Description +defining its mission and philosophy. The Zoltan project has a strong association +with the Trilinos project to share in the development of common software +engineering practices and sharing of appropriate tools and experience. +

+PR2. Perform a risk-based assessment, determine level of formality +and applicable practices, and obtain approvals.
+The Zoltan project has an approved level of formality (medium) for its +deliverable software. Its biggest technical risk results from providing +parallel solutions to NP hard partitioning problems. Technical risks are +mitigated by collaborations within Sandia and internationally. The most +significant non-technical risk is the conflicting priorities of Zoltan +developers working on many other projects simultaneously. +

+PR3. Document lifecycle processes and their interdependencies, and +obtain approvals.
+The Zoltan project follows the Trilinos Software Lifecycle Model +(SAND 2006-6929). It also follows the ANSI/ASQ Z1.13-1999 standard +Quality Guidelines for Research which is compatible with the research +phase in the Trilinos Lifecycle model. +

+PR4. Define, collect, and monitor appropriate process metrics.
+The Zoltan project is committed to comply fully with the new and evolving +AQMC requirements for collecting and reporting "defect" metrics. +Other metrics determined by Zoltan´s continual process improvement process +(PR 5) will be implemented. +

+PR5. Periodically evaluate quality problems and implement process +improvements.
+The Zoltan project has built the Deming/Shewhart process improvement +cycle PDCA (Plan, Do, Check, Act) into all of its process checklists. This is +the most effective process improvement technique known. It is recommended +by ISO 9001:2000. +

+PR6. Identify stakeholders and other requirements sources.
+The Zoltan project´s primary stakeholders are the ASC applications using +Zoltan including SIERRA, ACME, ALEGRA/NEVADA, XYCE, and Trilinos. +

+PR7. Gather and manage stakeholders´ expectations and requirements.
+The Zoltan project´s primary input from ASC applications´ expectations and +requirements are via their communication of Zoltan´s role in meeting their +ASC milestones. Since Zoltan is an "enabling technology," these requirements +are broadly stated performance improvement needs. The Zoltan team actively anticipates +and develops load balancing software for the future needs of the Sandia research community +before they actually become formal requirements. +

+PR8. Derive, negotiate, manage, and trace requirements.
+Zoltan project requirements normally derive from its funded research proposals +which state research goals. This is a normal procedure in a research +environment (see ANSI/ASQ Z1.13-1999). Periodic and final reports document +the success in meeting these research goals. +

+PR9. Identify and analyze risk events.
+All Zoltan developers should report any new or changed risks via the zoltan-dev +email target for evaluation by the Team Lead. +

+PR10. Define, monitor, and implement the risk response.
+The Zoltan team will create a corrective action plan whenever any condition +threatens to adversely impact the Zoltan project resources or schedule. +

+PR11. Create and manage the project plan.
+ANSI/ASQ Z1.13-1999 states that the research proposal is equivalent to a +project plan in a research environment. The Team Leader assigns responsibilities, +deliverables, resources, and schedules in order to manage the project. +

+PR12. Track project performance versus project plan and implement +needed (corrective) actions.
+The Team Leader periodically tracks responsibilities, deliverables, resources, +and schedules in order to manage the project. +

+PR13. Communicate and review design.
+The Zoltan architecture is fully documented in the Zoltan Developer´s Guide. +New features are originally documented and reviewed in team discussions to +the zoltan-dev email target. Prior to release, the design documentation is +finalized in both the Zoltan Developer´s Guide and the Zoltan User´s Guide. +

+PR14. Create required software and product documentation.
+Developers will follow the Zoltan Development Process Checklist. +

+PR15. Identify and track third party software products and follow +applicable agreements.
+Developers will follow the Zoltan Third Party Software Process Checklist. +

+PR16. Identify, accept ownership, and manage the assimilation of other +software products.
+Not applicable since Zoltan does not "assimilate" third party software. +

+PR17. Perform version control of identified software product artifacts.
+All software and process artifact are under maintained CVS as early as reasonable +after their creation. +

+PR18. Record and track issues associated with the software product.
+Developers will use Bugzilla to record and track issues. +

+PR19. Ensure backup and disaster recovery of software product artifacts.
+Nightly backups, periodic offsite backups, and disaster recovery are services +provided by the CSRI computer support staff. Disaster recovery has been successfully +performed from real problems. +

+PR20. Plan and generate the release package.
+Developers will follow the Zoltan Release Process Checklist. +

+PR21. Certify that the software product (code and its related artifacts) is +ready for release and distribution.
+The Zoltan Team Leader will certify any version of Zoltan for release via an +email to zoltan-dev target. +

+PR22. Distribute release to customers.
+Zoltan files are released via a download from the Zoltan web site. The Zoltan +Team Leader will make the download available after certification. (Research +versions of the Zoltan software are directly available to collaborators for +development.) +

+PR23. Define and implement a customer support plan.
+(See PR 6 for a list of ASC stakeholders.) The Zoltan team provides one-on-one +training whenever requested and quickly responds to any user complaint. +

+PR24. Implement the training identified in the customer support plan.
+See PR 23 above. If additional training is ever requested, the Zoltan project +will piggy back on the annual Trilinos Users Group meeting with a training +session on using Zoltan. +

+PR25. Evaluate customer feedback to determine customer satisfaction.
+

+

+PR 26 Develop and maintain a software verification plan.
+Developers are expected to create new tests for the Zoltan test suite when +new features are added to Zoltan. +

+Currently, a new test framework based on FAST/EXACT is being implemented. +Documentation about this test framework is under preparation. A process +checklist will be developed around the steps required to add new tests to +the suite and to run the suite. +

+PR27. Conduct tests to demonstrate that acceptance criteria are met and to +ensure that previously tested capabilities continue to perform as expected.
+This practice is a subset of the Zoltan Release Process Checklist. +

+PR28. Conduct independent technical reviews to evaluate adequacy with respect +to requirements.
+Developers will follow the Zoltan Review Process Checklist. ANSI/ASQ Z1.13-1999 +states that the peer reviewed publications and conference presentations are a normal +form of technical review in the research environment. +

+PR29. Determine project team training needed to fulfill assigned roles +and responsibilities.
+New developers will follow the Zoltan Training Process for new team members. +

+PR30. Track training undertaken by project teams.
+Zoltan developers are encouraged to participate in the annual Trilios Users Group +(TUG) meeting which provides sessions for SQA/SQE training to developers. +Attendance records are kept for this event and for any Zoltan team meetings that +provide training. Sandia provides many other opportunities for training including +formal courses and periodic internal software developers conferences. External +conferences (e.g., IPDPS and SIAM) are counted as technical training. + +

+


+
[Table of Contents | Next: +Zoltan Distribution | Previous: +Coding Principles in Zoltan +| Privacy and Security] + + + diff --git a/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_lb.html b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_lb.html new file mode 100644 index 00000000..8e7d8f05 --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_lb.html @@ -0,0 +1,78 @@ + + + + + + + + + + Zoltan Developer's Guide: Load-Balancing + + + + + + + +

+Zoltan Interface and Data Structures

+The interface functions, data types and data structures +for the Zoltan library are described in the following sections: +
Interface Functions  +(files defining the interface) +
ID Data Types (descriptions +of data types used for global and local identifiers) +
Data Structures (Zoltan data structures +for storing information registered by an application) +
 
+ +
+
[Table of Contents  |  Next:  +Interface Functions  |  Previous:  +Compilation  |  Privacy and Security] + + diff --git a/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_lb_interface.html b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_lb_interface.html new file mode 100644 index 00000000..cb29cb6b --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_lb_interface.html @@ -0,0 +1,104 @@ + + + + + + + + + + Zoltan Developer's Guide: Load-Balancing Interface + + + + + + + +

+Interface Functions

+The interface to the Zoltan library is defined in the file +include/zoltan.h. This file should be included in application programs +that use Zoltan. It is also included in zz/zz_const.h, +which should be included by most Zoltan files to provide +access to the Zoltan data structures described below. + +

In include/zoltan.h, the enumerated type ZOLTAN_FN_TYPE defines +the application query function types (e.g., ZOLTAN_NUM_OBJ_FN_TYPE +and ZOLTAN_OBJ_LIST_FN_TYPE). +The interface query routines (e.g., ZOLTAN_NUM_OBJ_FN +and ZOLTAN_OBJ_LIST_FN) +and their argument lists are defined as C type definitions (typedef). These +type definitions are used by the application developer to implement the +query functions needed for the application to use Zoltan. + +

Prototypes for the Zoltan interface functions +(e.g., Zoltan_LB_Partition +and Zoltan_Migrate) +are also included in include/zoltan.h. Interface functions are called +by the application to register functions, select a load-balancing method, +invoke load balancing and migrate data. + +

The interface to the C++ version of the Zoltan library is in the file +include/zoltan_cpp.h. This file defines the Zoltan class, +representing a Zoltan_Struct +data structure and the functions which +operate upon it. The conventions used to wrap C library functions as +C++ library functions are described in the chapter +C++ Interface. +A C++ program that uses Zoltan includes include/zoltan_cpp.h +instead of include/zoltan.h. + +

For more detailed information on Zoltan's query and interface functions, +please see the Zoltan User's Guide. + +

+


+
[Table of Contents  |  Next:  +ID Data Types  |  Previous:  +Zoltan Interface and Data Structures  |  Privacy and Security] + + diff --git a/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_lb_structs.html b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_lb_structs.html new file mode 100644 index 00000000..f6c06f5d --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_lb_structs.html @@ -0,0 +1,861 @@ + + + + + + + + + Zoltan Developer's Guide: Load Balancing Data Structures + + + + + + +

+Data Structures

+The Zoltan_Struct data structure is the main data structure for interfacing +between Zoltan and the application. The application +creates an Zoltan_Struct data structure through a call to Zoltan_Create. +Fields of the data structure are then set through calls from the application +to interface routines such as Zoltan_Set_Param +and Zoltan_Set_Fn. +The fields of the Zoltan_Struct data structure are listed and described +in the table below. See the Zoltan +User's Guide for descriptions of the function types used in the Zoltan_Struct. +

A Zoltan_Struct data structure zz is passed from the application +to Zoltan in the call to Zoltan_LB_Partition. +This data structure is passed to the individual load-balancing routines. +The zz->LB.Data_Structure pointer field should point to the main data +structures of the particular load-balancing algorithm so that the data +structures may be preserved for future calls to Zoltan_LB_Partition +and so that separate instances of the same load-balancing algorithm (with +different Zoltan_Struct structures) can be used by the application. +
  +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Fields of Zoltan_StructDescription
MPI_Comm CommunicatorThe MPI communicator to be used by the Zoltan structure; set +by +Zoltan_Create.
int ProcThe rank of the processor within Communicator; set in Zoltan_Create.
int Num_ProcThe number of processors in Communicator; set in Zoltan_Create.
int Num_GIDThe number of array entries used to represent a +global ID. +Set via a call to +Zoltan_Set_Param +for NUM_GID_ENTRIES. +
int Num_LIDThe number of array entries used to represent a +local ID. +Set via a call to +Zoltan_Set_Param +for NUM_LID_ENTRIES. +
int Debug_LevelA flag indicating the amount of debugging information that should be +printed by Zoltan.
int FortranA flag indicating whether or not the structure was created by a call +from Fortran.
PARAM_LIST * ParamsA linked list of string pairs. The first item in each pair is the name +of a modifiable parameter. The second string is the new value the parameter +should adopt. These string pairs are read upon invocation of a Zoltan +algorithm and the appropriate parameter changes are made. This design allows +for different Zoltan structures to have different parameter settings.
int DeterministicFlag indicating whether algorithms used should be forced to be deterministic; +used to obtain completely reproducible results.  Set via a call to +Zoltan_Set_Param +for DETERMINISTIC.
int Obj_Weight_DimNumber of weights per object. +  Set via a call to +Zoltan_Set_Param +for OBJ_WEIGHT_DIM.
int Edge_Weight_DimFor graph algorithms, number of weights per edge. +Set via a call to +Zoltan_Set_Param +for EDGE_WEIGHT_DIM.
int Timer Timer type that is currently active. +Set via a call to +Zoltan_Set_Param +for TIMER.
ZOLTAN_NUM_EDGES_FN +* Get_Num_EdgesA pointer to an application-registered function that returns the number +of edges associated with a given object. Set in Zoltan_Set_Fn +or Zoltan_Set_Num_Edges_Fn.
void *Get_Num_Edges_DataA pointer to data provided by the user that will be passed to the function +pointed to by Get_Num_Edges. Set in Zoltan_Set_Fn +or Zoltan_Set_Num_Edges_Fn.
ZOLTAN_EDGE_LIST_FN +* Get_Edge_ListA pointer to an application-registered function that returns a given +object's neighbors along its edges. Set in Zoltan_Set_Fn +or Zoltan_Set_Edge_List_Fn.
void *Get_Edge_List_DataA pointer to data provided by the user that will be passed to the function +pointed to by Get_Edge_List. Set in Zoltan_Set_Fn +or Zoltan_Set_Edge_List_Fn.
ZOLTAN_NUM_GEOM_FN +* Get_Num_GeomA pointer to an application-registered function that returns the number +of geometry values needed to describe the positions of objects. Set in +Zoltan_Set_Fn +or Zoltan_Set_Num_Geom_Fn.
void *Get_Num_Geom_DataA pointer to data provided by the user that will be passed to the function +pointed to by Get_Num_Geom. Set in Zoltan_Set_Fn +or Zoltan_Set_Num_Geom_Fn.
ZOLTAN_GEOM_FN +* Get_GeomA pointer to an application-registered function that returns a given +object's geometry information (e.g., coordinates). Set in Zoltan_Set_Fn +or Zoltan_Set_Geom_Fn.
void *Get_Geom_DataA pointer to data provided by the user that will be passed to the function +pointed to by Get_Geom. Set in Zoltan_Set_Fn +or Zoltan_Set_Geom_Fn.
ZOLTAN_NUM_OBJ_FN +* Get_Num_ObjA pointer to an application-registered function that returns the number +of objects assigned to the processor before load balancing. Set in Zoltan_Set_Fn +or Zoltan_Set_Num_Obj_Fn.
void *Get_Num_Obj_DataA pointer to data provided by the user that will be passed to the function +pointed to by Get_Num_Obj. Set in Zoltan_Set_Fn +or Zoltan_Set_Num_Obj_Fn.
ZOLTAN_OBJ_LIST_FN +* Get_Obj_ListA pointer to an application-registered function that returns arrays +of objects assigned to the processor before load balancing. Set in Zoltan_Set_Fn +or Zoltan_Set_Obj_List_Fn.
void *Get_Obj_List_DataA pointer to data provided by the user that will be passed to the function +pointed to by Get_Obj_List. Set in Zoltan_Set_Fn +or Zoltan_Set_Obj_List_Fn.
ZOLTAN_FIRST_OBJ_FN +* Get_First_ObjA pointer to an application-registered function that returns the first +object assigned to the processor before load balancing. Used with Get_Next_Obj +as an iterator over all objects. Set in Zoltan_Set_Fn +or Zoltan_Set_First_Obj_Fn.
void *Get_First_Obj_DataA pointer to data provided by the user that will be passed to the function +pointed to by Get_First_Obj. Set in Zoltan_Set_Fn +or Zoltan_Set_First_Obj_Fn.
ZOLTAN_NEXT_OBJ_FN +* Get_Next_ObjA pointer to an application-registered function that, given an object +assigned to the processor, returns the next object assigned to the processor +before load balancing. Used with Get_First_Obj as an iterator over +all objects. Set in Zoltan_Set_Fn +or Zoltan_Set_Next_Obj_Fn.
void *Get_Next_Obj_DataA pointer to data provided by the user that will be passed to the function +pointed to by Get_Next_Obj. Set in Zoltan_Set_Fn +or Zoltan_Set_Next_Obj_Fn.
ZOLTAN_NUM_BORDER_OBJ_FN +* Get_Num_Border_ObjA pointer to an application-registered function that returns the number +of objects sharing a subdomain border with a given processor. Set in Zoltan_Set_Fn +or Zoltan_Set_Num_Border_Obj_Fn.
void *Get_Num_Border_Obj_DataA pointer to data provided by the user that will be passed to the function +pointed to by Get_Num_Border_Obj. Set in Zoltan_Set_Fn +or Zoltan_Set_Num_Border_Obj_Fn.
ZOLTAN_BORDER_OBJ_LIST_FN +* Get_Border_Obj_ListA pointer to an application-registered function that returns arrays +of objects that share a subdomain border with a given processor. Set in +Zoltan_Set_Fn +or Zoltan_Set_Border_Obj_List_Fn.
void *Get_Border_Obj_List_DataA pointer to data provided by the user that will be passed to the function +pointed to by Get_Border_Obj_List. Set in Zoltan_Set_Fn +or Zoltan_Set_Border_Obj_List_Fn.
ZOLTAN_FIRST_BORDER_OBJ_FN +* Get_First_Border_ObjA pointer to an application-registered function that returns the first +object sharing a subdomain border with a given processor. Used with Get_Next_Border_Obj +as an iterator over objects along borders. Set in Zoltan_Set_Fn +or Zoltan_Set_First_Border_Obj_Fn.
void *Get_First_Border_Obj_DataA pointer to data provided by the user that will be passed to the function +pointed to by Get_First_Border_Obj. Set in Zoltan_Set_Fn +or Zoltan_Set_First_Border_Obj_Fn.
ZOLTAN_NEXT_BORDER_OBJ_FN +* Get_Next_Border_ObjA pointer to an application-registered function that, given an object, +returns the next object sharing a subdomain border with a given processor. +Used with Get_First_Border_Obj as an iterator over objects along +borders. Set in Zoltan_Set_Fn +or Zoltan_Set_Next_Border_Obj_Fn.
void *Get_Next_Border_Obj_DataA pointer to data provided by the user that will be passed to the function +pointed to by Get_Next_Border_Obj. Set in Zoltan_Set_Fn +or Zoltan_Set_Next_Border_Obj_Fn.
ZOLTAN_NUM_COARSE_OBJ_FN +* Get_Num_Coarse_ObjA pointer to an application-registered function that returns the number +of objects in the initial coarse grid. Set in Zoltan_Set_Fn +or Zoltan_Set_Num_Coarse_Obj_Fn.
void *Get_Num_Coarse_Obj_DataA pointer to data provided by the user that will be passed to the function +pointed to by Get_Num_Coarse_Obj. Set in Zoltan_Set_Fn +or Zoltan_Set_Num_Coarse_Obj_Fn.
ZOLTAN_COARSE_OBJ_LIST_FN +* Get_Coarse_Obj_ListA pointer to an application-registered function that returns arrays +of objects in the initial coarse grid. Set in Zoltan_Set_Fn +or Zoltan_Set_Coarse_Obj_List_Fn.
void *Get_Coarse_Obj_List_DataA pointer to data provided by the user that will be passed to the function +pointed to by Get_Coarse_Obj_List. Set in Zoltan_Set_Fn +or Zoltan_Set_Coarse_Obj_List_Fn.
ZOLTAN_FIRST_COARSE_OBJ_FN +* Get_First_Coarse_ObjA pointer to an application-registered function that returns the first +object of the initial coarse grid. Used with Get_Next_Coarse_Obj +as an iterator over all objects in the coarse grid. Set in Zoltan_Set_Fn +or Zoltan_Set_First_Coarse_Obj_Fn.
void *Get_First_Coarse_Obj_DataA pointer to data provided by the user that will be passed to the function +pointed to by Get_First_Coarse_Obj. Set in Zoltan_Set_Fn +or Zoltan_Set_First_Coarse_Obj_Fn.
ZOLTAN_NEXT_COARSE_OBJ_FN +* Get_Next_Coarse_ObjA pointer to an application-registered function that, given an object +in the initial coarse grid, returns the next object in the coarse grid. +Used with Get_First_Coarse_Obj as an iterator over all objects in +the coarse grid. Set in Zoltan_Set_Fn +or Zoltan_Set_Next_Coarse_Obj_Fn.
void *Get_Next_Coarse_Obj_DataA pointer to data provided by the user that will be passed to the function +pointed to by Get_Next_Coarse_Obj. Set in Zoltan_Set_Fn +or Zoltan_Set_Next_Coarse_Obj_Fn.
ZOLTAN_NUM_CHILD_FN +* Get_Num_ChildA pointer to an application-registered function that returns the number +of refinement children of an object. Set in Zoltan_Set_Fn +or Zoltan_Set_Num_Child_Fn.
void *Get_Num_Child_DataA pointer to data provided by the user that will be passed to the function +pointed to by Get_Num_Child. Set in Zoltan_Set_Fn +or Zoltan_Set_Num_Child_Fn.
ZOLTAN_CHILD_LIST_FN +* Get_Child_ListA pointer to an application-registered function that returns arrays +of objects that are refinement children of a given object. Set in Zoltan_Set_Fn +or Zoltan_Set_Child_List_Fn.
void *Get_Child_List_DataA pointer to data provided by the user that will be passed to the function +pointed to by Get_Child_List. Set in Zoltan_Set_Fn +or Zoltan_Set_Child_List_Fn.
ZOLTAN_CHILD_WEIGHT_FN +* Get_Child_WeightA pointer to an application-registered function that returns the weight +of an object. Set in Zoltan_Set_Fn +or Zoltan_Set_Child_Weight_Fn.
void *Get_Child_Weight_DataA pointer to data provided by the user that will be passed to the function +pointed to by Get_Child_Weight. Set in Zoltan_Set_Fn +or Zoltan_Set_Child_Weight_Fn.
ZOLTAN_OBJ_SIZE_FN +* Get_Obj_SizeA pointer to an application-registered function that returns the size +(in bytes) of data objects to be migrated. Called by Zoltan_Migrate.  +Set in Zoltan_Set_Fn +or Zoltan_Set_Obj_Size_Fn.
void *Get_Obj_Size_DataA pointer to data provided by the user that will be passed to the function +pointed to by Get_Obj_Size.  Set in Zoltan_Set_Fn +or Zoltan_Set_Obj_Size_Fn.
ZOLTAN_PACK_OBJ_FN +* Pack_ObjA pointer to an application-registered function that packs all data +for a given object into a communication buffer provided by the migration +tools in preparation for data-migration communication. Called by Zoltan_Migrate +for each object to be exported.  Set in Zoltan_Set_Fn +or Zoltan_Set_Pack_Obj_Fn.
void *Pack_Obj_DataA pointer to data provided by the user that will be passed to the function +pointed to by Pack_Obj. Set in Zoltan_Set_Fn +or Zoltan_Set_Pack_Obj_Fn.
ZOLTAN_UNPACK_OBJ_FN +* Unpack_ObjA pointer to an application-registered function that unpacks all data +for a given object from a communication buffer after the communication +for data migration is completed. Called by Zoltan_Migrate +for each imported object.  Set in Zoltan_Set_Fn +or Zoltan_Set_Unpack_Obj_Fn.
void *Unpack_Obj_DataA pointer to data provided by the user that will be passed to the function +pointed to by Unpack_Obj. Set in Zoltan_Set_Fn +or Zoltan_Set_Unpack_Obj_Fn.
ZOLTAN_LB LBA structure with data used by the load-balancing tools. See the table +below.
ZOLTAN_MIGRATE MigrateA structure with data used by the migration tools. See the table +below.
Fields of the Zoltan_Struct data structure.
+ +

Each Zoltan_Struct data structure has a ZOLTAN_LB sub-structure. +The ZOLTAN_LB structure contains data used by the load-balancing tools, +including pointers to specific load-balancing methods and load-balancing data +structures. +The fields of the ZOLTAN_LB structure are listed and described +in in the following table. +
  +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Fields of ZOLTAN_LBDescription
void * Data_StructureThe data structure used by the selected load-balancing algorithm; this +pointer is cast by the algorithm to the appropriate data type.
double Imbalance_TolThe degree of load balance which is considered acceptable. +Set via a call to +Zoltan_Set_Param +for IMBALANCE_TOL.
int Num_Global_Parts +The total number of parts to be generated. +Set via a call to +Zoltan_Set_Param +for NUM_GLOBAL_PARTS or through +summation of +NUM_LOCAL_PARTS +parameters. +
int Num_Local_Parts +The number of parts to be generated on this processor. +Set via a call to +Zoltan_Set_Param +for NUM_LOCAL_PARTS or (roughly) through +division of the +NUM_GLOBAL_PARTS +parameter by the number of processors. +
int Return_ListsA flag indicating whether the application wants import and/or export +lists returned by Zoltan_LB_Partition. +Set via a call to +Zoltan_Set_Param +for RETURN_LISTS.
ZOLTAN_LB_METHOD MethodAn enumerated type designating which load-balancing algorithm should +be used with this Zoltan structure; +set via a call to +Zoltan_Set_Param +for LB_METHOD. +
LB_FN * LB_FnA pointer to the load-balancing function specified by Method. +
ZOLTAN_LB_FREE_DATA_FN *Free_Structure Pointer to a function that frees the Data_Structure memory. +
ZOLTAN_LB_POINT_ASSIGN_FN *Point_Assign Pointer to the function that performs +Zoltan_LB_Point_Assign for the particular load-balancing method. +
ZOLTAN_LB_BOX_ASSIGN_FN *Box_Assign Pointer to the function that performs +Zoltan_LB_Box_Assign for the particular load-balancing method. +
Fields of the ZOLTAN_LB data structure.
+ + +

Each Zoltan_Struct data structure has a ZOLTAN_MIGRATE sub-structure. +The ZOLTAN_MIGRATE structure contains data used by the migration tools, +including pointers to pre- and post-processing routines. These pointers +are set through the interface routine Zoltan_Set_Fn +and are used in Zoltan_Migrate. +The fields of the ZOLTAN_MIGRATE structure are listed and described +in in the following table. +
  +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Fields of ZOLTAN_MIGRATEDescription
int Auto_MigrateA flag indicating whether Zoltan should perform +auto-migration for the application. If true, Zoltan +calls Zoltan_Migrate +to move objects to their new processors; if false, data migration is left +to the user. Set in Zoltan_Set_Param +for AUTO_MIGRATE.
ZOLTAN_PRE_MIGRATE_PP_FN +* Pre_Migrate_PPA pointer to an application-registered function that performs pre-processing +for data migration. The function is called by Zoltan_Migrate +before data migration is performed. Set in Zoltan_Set_Fn +or Zoltan_Set_Pre_Migrate_PP_Fn.
void *Pre_Migrate_PP_DataA pointer to data provided by the user that will be passed to the function +pointed to by Pre_Migrate_PP. Set in Zoltan_Set_Fn +or Zoltan_Set_Pre_Migrate_PP_Fn.
ZOLTAN_MID_MIGRATE_PP_FN +* Mid_Migrate_PPA pointer to an application-registered function that performs processing +between the packing and unpacking operations in Zoltan_Migrate. +Set in Zoltan_Set_Fn +or Zoltan_Set_Mid_Migrate_PP_Fn.
void *Mid_Migrate_PP_DataA pointer to data provided by the user that will be passed to the function +pointed to by Mid_Migrate_PP. Set in Zoltan_Set_Fn +or Zoltan_Set_Mid_Migrate_PP_Fn.
ZOLTAN_POST_MIGRATE_PP_FN +*Post_Migrate_PPA pointer to an application-registered function that performs post-processing +for data migration. The function is called by Zoltan_Migrate +after data migration is performed. Set in Zoltan_Set_Fn +or Zoltan_Set_Post_Migrate_PP_Fn.
void *Post_Migrate_PP_DataA pointer to data provided by the user that will be passed to the function +pointed to by Post_Migrate_PP.  Set in Zoltan_Set_Fn +or Zoltan_Set_Post_Migrate_PP_Fn
ZOLTAN_PRE_MIGRATE_FN +* Pre_MigrateA pointer to an application-registered function that performs pre-processing +for data migration. The function is called by Zoltan_Help_Migrate +before data migration is performed. Set in Zoltan_Set_Fn +or Zoltan_Set_Pre_Migrate_Fn. +Maintained for backward compatibility with Zoltan v1.3 interface. +
void *Pre_Migrate_DataA pointer to data provided by the user that will be passed to the function +pointed to by Pre_Migrate. Set in Zoltan_Set_Fn +or Zoltan_Set_Pre_Migrate_Fn.
ZOLTAN_MID_MIGRATE_FN +* Mid_MigrateA pointer to an application-registered function that performs processing +between the packing and unpacking operations in Zoltan_Help_Migrate. +Set in Zoltan_Set_Fn +or Zoltan_Set_Mid_Migrate_Fn. +Maintained for backward compatibility with Zoltan v1.3 interface. +
void *Mid_Migrate_DataA pointer to data provided by the user that will be passed to the function +pointed to by Mid_Migrate. Set in Zoltan_Set_Fn +or Zoltan_Set_Mid_Migrate_Fn.
ZOLTAN_POST_MIGRATE_FN +*Post_MigrateA pointer to an application-registered function that performs post-processing +for data migration. The function is called by Zoltan_Help_Migrate +after data migration is performed. Set in Zoltan_Set_Fn +or Zoltan_Set_Post_Migrate_Fn. +Maintained for backward compatibility with Zoltan v1.3 interface. +
void *Post_Migrate_DataA pointer to data provided by the user that will be passed to the function +pointed to by Post_Migrate.  Set in Zoltan_Set_Fn +or Zoltan_Set_Post_Migrate_Fn
Fields of the ZOLTAN_MIGRATE data structure.
+ +

For each pointer to an application registered function in the +Zoltan_Struct +and ZOLTAN_MIGRATE data structures there is also a pointer to a Fortran +application registered function, of the form ZOLTAN_FUNCNAME_FORT_FN *Get_Funcname_Fort. +These are for use within the Fortran interface. The Zoltan routines +should invoke the usual application registered function regardless of whether +the Zoltan structure was created from C or Fortran. + + + +


+
[Table of Contents  |  Next:  +Services  |  Previous:  +ID Data Types  |  Privacy and Security] + + diff --git a/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_lb_types.html b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_lb_types.html new file mode 100644 index 00000000..ade5ae6b --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_lb_types.html @@ -0,0 +1,249 @@ + + + + + + + + + Zoltan Developer's Guide: Data Types + + + + + + +

+ID Data Types

+Within Zoltan, objects are identified by a global +identification (ID) value provided by the application. This global ID must +be unique across all processors. The application may also provide a local +ID value that it can use for faster location of objects within its own +data structure. For example, local array indices to objects' data may be +provided as the local IDs; these indices can then be used to directly access +data in the query functions. Zoltan does not use these +local IDs, but since it must pass them to the application in the interface +query functions, it must store them with the objects' data.  ID data +types and macros for manipulating IDs are described below. +
IDs and Arrays of IDs +
Allocating IDs +
Common Operations on IDs
+ +

+IDs and Arrays of IDs

+Zoltan stores each global and local ID as an array of unsigned integers. +Arrays of IDs are passed to the application as a one-dimensional array +of unsigned integers with size +number_of_IDs * number_of_entries_per_ID. +A type definition ZOLTAN_ID_PTR +(in include/zoltan_types.h) points to an ID or array of IDs. The number +of array entries per ID can be set by the application using the NUM_GID_ENTRIES +and +NUM_LID_ENTRIES +parameters. +

+Allocating IDs

+Macros that simplify the allocation of global and local IDs are described +in the table below. These macros provide consistent, easy-to-use memory +allocation with error checking and, thus, their use is highly recommended.  +Each macro returns NULL if either a memory error occurs or the number of +IDs requested is zero. +
  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ZOLTAN_ID_PTR +ZOLTAN_MALLOC_GID(struct Zoltan_Struct +*zz);Allocates and returns a pointer to a single global ID.
ZOLTAN_ID_PTR +ZOLTAN_MALLOC_LID(struct Zoltan_Struct +*zz);Allocates and returns a pointer to a single local ID.
ZOLTAN_ID_PTR +ZOLTAN_MALLOC_GID_ARRAY(struct Zoltan_Struct +*zz, int n);Allocates and returns a pointer to an array of n global IDs, +where the index into the array for the ith global ID +is i*NUM_GID_ENTRIES.
ZOLTAN_ID_PTR +ZOLTAN_MALLOC_LID_ARRAY(struct Zoltan_Struct +*zz, int n);Allocates and returns a pointer to an array of n local IDs, +where the index into the array for the ith local ID is +i*NUM_LID_ENTRIES.
ZOLTAN_ID_PTR +ZOLTAN_REALLOC_GID_ARRAY(struct Zoltan_Struct +*zz, ZOLTAN_ID_PTR +ptr, int n);Reallocates and returns a pointer to an array of n global IDs, +replacing the current array pointed to by ptr.
ZOLTAN_ID_PTR +ZOLTAN_REALLOC_LID_ARRAY(struct Zoltan_Struct +*zz, ZOLTAN_ID_PTR +ptr, int n);Reallocates and returns a pointer to an array of n local IDs, +replacing the current array pointed to by ptr.
+ +

+Common Operations on IDs

+In addition, macros are defined for common operations on global and local +IDs.  These macros include error checking when appropriate and account +for different values of NUM_GID_ENTRIES +and NUM_LID_ENTRIES.  +Use of these macros improves code robustness and simplifies code maintenance; +their use is highly recommended. +
  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
void ZOLTAN_INIT_GID(struct Zoltan_Struct +*zz, ZOLTAN_ID_PTR id);Initializes all entries of the global ID id to zero; id  +must be allocated before calling ZOLTAN_INIT_GID.
void ZOLTAN_INIT_LID(struct Zoltan_Struct +*zz, ZOLTAN_ID_PTR id);Initializes all entries of the local ID id to zero; id  +must be allocated before calling ZOLTAN_INIT_LID.
void ZOLTAN_SET_GID(struct Zoltan_Struct +*zz, ZOLTAN_ID_PTR tgt, +ZOLTAN_ID_PTR +src);Copies the global ID src into the global ID tgt.  +Both src and tgt must be allocated before calling ZOLTAN_SET_LID.
void ZOLTAN_SET_LID(struct Zoltan_Struct +*zz, ZOLTAN_ID_PTR tgt, +ZOLTAN_ID_PTR +src);Copies the local ID src into the local ID tgt.  +Both src and tgt must be allocated before calling ZOLTAN_SET_LID.
int ZOLTAN_EQ_GID(struct Zoltan_Struct +*zz, ZOLTAN_ID_PTR a, +ZOLTAN_ID_PTR +b);Returns TRUE if global ID a is equal to global ID b.
void ZOLTAN_PRINT_GID(struct Zoltan_Struct +*zz, ZOLTAN_ID_PTR id);Prints all entries of a single global ID id.
void ZOLTAN_PRINT_LID(struct Zoltan_Struct +*zz, ZOLTAN_ID_PTR id);Prints all entries of a single local ID id.
+ +

+


+
[Table of Contents  |  Next:  +Data Structures  |  Previous:  +Interface Functions  |  Privacy and Security] + + diff --git a/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_mig.html b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_mig.html new file mode 100644 index 00000000..971ada41 --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_mig.html @@ -0,0 +1,78 @@ + + + + + + + + + Zoltan Developer's Guide: Migration Tools + + + + + + +

+Migration Tools

+The migration tools in the Zoltan library perform communication necessary +for data migration in the application. The routine Zoltan_Migrate +calls application-registered packing routines to gather data to be sent +to other processors. It sends the data using the unstructured +communication package. It then calls application-registered unpacking +routines for each imported object to add received data to the processor's +data structures. See the Zoltan User's Guide +for more details on the use of and interface to the migration tools. +

In future releases, the migration tools will be updated to use +MPI data types to support heterogeneous computing architectures. +
  +

+


+
[Table of Contents  |  Next:  +FORTRAN Interface  |  Previous:  +Part Remapping  |  Privacy and Security] + + diff --git a/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_parmetis.html b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_parmetis.html new file mode 100644 index 00000000..6be236b6 --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_parmetis.html @@ -0,0 +1,176 @@ + + + + + + + + + Zoltan Developer's Guide: ParMETIS/Jostle + + + + + + +

+Appendix: ParMETIS +and Jostle

+ +

+Overview of structure (algorithm)

+This part of Zoltan provides an interface to various graph-based load-balancing +algorithms. Currently two libraries are supported: ParMETIS +and +Jostle. +Each of these libraries contain several algorithms. +

+Interface algorithm

+The structure of the code is as follows: Each package +(ParMETIS, +Jostle) +has its own wrapper routine that performs initialization and sets parameters. +The main routine is Zoltan_ParMetis_Jostle, which constructs an appropriate +graph data structure using Zoltan's query functions. After the graph structure +has been constructed, the appropriate library is called and the import/export +list is created and returned. +

Please note that ParMETIS +and Jostle are not integral parts +of Zoltan. These libraries must be obtained and installed separately. +(ParMETIS +may be bundled with Zoltan, but it is an independent package developed +at Univ. of Minnesota.) Zoltan merely provides an interface to these libraries. +

The most complex task in the interface code is the construction of the +graph data structure. This structure is described in the next section. +The routine uses the Zoltan query functions to get a list of objects and +edges on each processor. Each object has a unique global ID which is mapped +into a unique (global) number between 1 and n, where n is +the total number of objects. The construction of the local (on-processor) +part of the graph is straightforward. When an edge goes between objects +that reside on different processors, global communication is required. +We use Zoltan's unstructured communication library for this. A hash function +(Zoltan_Hash) is used to efficiently map global IDs to integers. +The graph construction algorithm has parallel complexity +O(maxj {nj+mj+p}), where +nj is the number of objects on processor j, +mj is the number of edges on processor j, and +p is the number of processors. +

One other feature of the interface code should be mentioned.  While +Zoltan allows objects and edges to have real (float) weights, both ParMETIS and Jostle +currently require integer weights. Therefore, Zoltan first checks if the +object weights are integers. If not, the weights are automatically scaled +and rounded to integers. The scaling is performed such that the weights +become large integers, subject to the constraint that the sum of (any component +of) the weights is less than a large constant MAX_WGT_SUM < INT_MAX. +The scaled weights are rounded up to the nearest integer to ensure that +nonzero weights never become zero. +Note that for multidimensional weights, each weight component is scaled independently. +(The source code is written such that this scaling is simple to change.) +

Currently Zoltan constructs and discards the entire graph structure +every time a graph-based method (ParMETIS or Jostle) is called. Incremental +update of the graph structure may be supported in the future. +

The graph construction code in Zoltan_ParMetis_Jostle can also be +used to interface with other graph-based algorithms. +

+Algorithms used in ParMETIS and Jostle libraries

+There are two main types of algorithms used in ParMETIS and Jostle. The +first is multilevel graph partitioning. The main idea is to take a large +graph and  construct a sequence of smaller and simpler graphs that +in some sense approximate the original graph. When the graph is sufficiently +small it is partitioned using some other method. This smallest graph and +the corresponding partition is then propagated back through all the levels +to the original graph. A popular local refinement strategy known as Kernighan-Lin +is employed at some or every level. +

The second main strategy is diffusion. This method assumes that an initial +partition (balance) is given, and load balance is achieved by repeatedly +moving objects (nodes) from parts (processors) that have too heavy +load to neighboring parts (processors) with too small load. +

For further details about the algorithms in a specific library, please +refer to the documentation that is distributed with that library. +

+Data structures

+We use the ParMETIS parallel graph structure. This is implemented using +5 arrays: +
    +
  1. +vtxdist: gives the distribution of the objects (vertices) to processors
  2. + +
  3. +xadj: indices (pointers) to the adjncy array
  4. + +
  5. +adjncy: neighbor lists
  6. + +
  7. +adjwgt: edge weights
  8. + +
  9. +vwgt: vertex (object) weights
  10. +
+The vtxdist array is duplicated on all processors, while the other +arrays are local. +
For more details, see the ParMETIS User's Guide. +

+Parameters

+Zoltan supports the most common parameters in ParMETIS and Jostle. These +parameters are parsed in the package-specific wrapper routine (Zoltan_ParMetis +or Zoltan_Jostle) and later passed on to the desired library via Zoltan_ParMetis_Jostle. +

In addition, Zoltan has one graph parameter of its own: CHECK_GRAPH. +This parameter is set in Zoltan_ParMetis_Jostle and specifies the amount +of verification that is performed on the constructed graph. For example, it +is required that the graph is symmetric and that the weights are non-negative. +

+Main routine

+The main routine is Zoltan_ParMetis_Jostle but it should always be accessed +through either Zoltan_ParMetis or Zoltan_Jostle. +

+


+
[Table of Contents  |  Next:  +Hypergraph Partitioning  |  Previous:  +Recursive Inertial Bisection (RIB)  |  Privacy and Security] + + diff --git a/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_phg.html b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_phg.html new file mode 100644 index 00000000..9fb62145 --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_phg.html @@ -0,0 +1,504 @@ + + + + + + + + + Zoltan Developer's Guide: Hypergraph Partitioning + + + +

+Appendix: Hypergraph Partitioning

+Hypergraph partitioning is a useful partitioning and +load balancing method when connectivity data is available. It can be +viewed as a more sophisticated alternative to +the traditional graph partitioning. +

A hypergraph consists of vertices and hyperedges. A hyperedge +connects +one or more vertices. A graph is a special case of a hypergraph where +each edge has size two (two vertices). The hypergraph model is well +suited to parallel computing, where vertices correspond to data objects +and hyperedges represent the communication requirements. The basic +partitioning problem is to partition the vertices into k +approximately equal sets such that the number of cut hyperedges is +minimized. Most partitioners (including Zoltan-PHG) allows a more +general +model where both vertices and hyperedges can be assigned weights. +It has been +shown that the hypergraph model gives a more accurate representation +of communication cost (volume) than the graph model. In particular, +for sparse matrix-vector multiplication, the hypergraph model +exactly represents communication volume. Sparse +matrices can be partitioned either along rows or columns; +in the row-net model the columns are vertices and each row corresponds +to an hyperedge, while in the column-net model the roles of vertices +and hyperedges are reversed.

+

Zoltan contains a native parallel hypergraph partitioner, called PHG +(Parallel HyperGraph partitioner). In addition, Zoltan provides +access to PaToH, +a serial hypergraph partitioner. +Note that PaToH is not part of Zoltan and should be obtained +separately from the +PaToH web site. +Zoltan-PHG is a fully parallel multilevel hypergraph partitioner. For +further technical description, see [Devine et al, 2006].
+

+

Algorithm:

+The algorithm used is multilevel hypergraph partitioning. For +coarsening, several versions of inner product (heavy connectivity) +matching are available. +The refinement is based on Fiduccia-Mattheysis (FM) but in parallel it +is only an approximation. + +

Parallel implementation:

+A novel feature of our parallel implementation is that we use a 2D +distribution of the hypergraph. That is, each processor owns partial +data about some vertices and some hyperedges. The processors are +logically organized in a 2D grid as well. Most communication is limited +to either a processor row or column. This design should allow for +good scalability on large number of processors.
+ +

Data structures:

+The hypergraph is the most important data structure. This is stored as +a compressed sparse matrix. Note that in parallel, each processor owns +a local part of the global hypergraph +(a submatrix of the whole matrix). +The hypergraph data type is struct HGraph, and contains +information like number of vertices, hyperedges, pins, compressed +storage of all pins, optional vertex and edge weights, pointers +to relevant communicators, and more. One cryptic notation needs an +explanation: The arrays hindex, hvertex are used to +look up vertex info given a hyperedge, and vindex, vedge are +used to look up hyperedge info given a vertex. Essentially, +we store the hypergraph as a sparse matrix in both CSR and CSC formats. +This doubles the memory cost but gives better performance. +The data on each processor is stored using local indexing, starting at zero. +In order to get the global vertex or edge number, use the macros +VTX_LNO_TO_GNO and EDGE_LNO_TO_GNO. These macros will +look up the correct offsets (using the dist_x and dist_y arrays). +Note that phg->nVtx is always the local number of vertices, +which may be zero on some processors. + +

Parameters:

+In the User's Guide, only the most essential parameters have been +documented. There are several other parameters, intended for developers +and perhaps expert "power" users. We give a more complete list of all +parameters below. Note that these parameters may change in future versions!
+
+For a precise list of parameters in a particular version of Zoltan, look at the source code (phg.c). + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Method String:HYPERGRAPH
Parameters:
+
    HYPERGRAPH_PACKAGE
+
PHG (parallel) or PaToH (serial)
+
   CHECK_HYPERGRAPH
+
Check if input data is valid. +(Slows performance;intended for debugging.)
+
    +PHG_OUTPUT_LEVEL
+
Level of verbosity; 0 is silent.
+
    PHG_FINAL_OUTPUT
+
Print stats about final +partition? (0/1)
+
    PHG_NPROC_VERTEX
+
Desired number of processes in +the vertex direction (for 2D internal layout)
    PHG_NPROC_HEDGE
+
Desired number of processes in +the hyperedge direction (for 2D internal layout)
    PHG_COARSENING_METHODThe method to use in matching/coarsening; currently these are +available. 
+ agg - agglomerative inner product +matching (a.k.a. heavy connectivity matching)
+ ipm - inner product +matching (a.k.a. heavy connectivity matching)
+ c-ipm -  column +ipm;  faster method based on ipm within processor columns
+ a-ipm - alternate +between fast method (l-ipm ) and ipm
+ l-ipm -  local ipm +on each processor. Fastest option  but often gives poor quality.
+ h-ipm - hybrid ipm that  uses partial c-ipm followed +by ipm on each level
+
+
    PHG_COARSENING_LIMIT
+
Number of vertices at which to stop coarsening.
+
    PHG_VERTEX_VISIT_ORDER
+
Ordering of vertices in greedy +matching scheme:
+0 - random
+1 - natural order (as given by the query functions)
+2 - increasing vertex weights
+3 - increasing vertex degree
+4 - increasing vertex degree, weighted by pins
+
    PHG_EDGE_SCALING
+
Scale edge weights by some +function of size of the hyperedges:
+0 - no scaling
+1 - scale by 1/(size-1)     [absorption scaling]
+2 - scale by 2/((size*size-1)) [clique scaling]
+
    PHG_VERTEX_SCALING
+
Variations in "inner product" +similarity metric (for matching):
+0 - Euclidean inner product: <x,y>
+1 - cosine similarity: <x,y>/(|x|*|y|)
+2 - <x,y>/(|x|^2 * |y|^2)
+3 - scale by sqrt of vertex weights
+4 - scale by vertex weights
+
    PHG_COARSEPARTITION_METHODMethod to partition the coarsest (smallest) hypergraph; +typically done in serial:
+ random - random
+ linear - linear +(natural) order
+ greedy - greedy method +based on minimizing cuts
+ auto - automatically +select from the above methods (in parallel, the processes will do +different methods)
+
    PHG_REFINEMENT_METHOD
+
Refinement algorithm:
fm - two-way +approximate  FM
+ none - no refinement
+
    PHG_REFINEMENT_LOOP_LIMITLoop limit in FM refinement. Higher number means more +refinement.
+
    PHG_REFINEMENT_MAX_NEG_MOVE
+
Maximum number of negative moves allowed in FM.
+
   PHG_BAL_TOL_ADJUSTMENT
+
Controls how the balance tolerance is adjusted at +each level of bisection.
+
  PHG_RANDOMIZE_INPUT
+
Randomize layout of vertices and +hyperedges in internal parallel 2D layout? (0/1)
+
  PHG_EDGE_WEIGHT_OPERATION + Operation to be applied to edge +weights supplied by different processes for the same hyperedge:
+ add - the hyperedge weight will be the sum of the supplied +weights
+ max - the hyperedge weight will be the maximum of the +supplied weights
+ error - if the hyperedge weights are not equal, Zoltan +will flag an error, otherwise the hyperedge weight will be the value +returned by the processes
+
   EDGE_SIZE_THRESHOLD
+
Ignore hyperedges greater than this fraction times +number of vertices.
+
   PATOH_ALLOC_POOL0
+
Memory allocation for PaToH; see +the PaToH manual for details.
+
   PATOH_ALLOC_POOL1
+
Memory allocation for PaToH; see +the PaToH manual for details.
Default values:
+

+
HYPERGRAPH_PACKAGE = PHG
+

+
CHECK_HYPERGRAPH += 0
+

+
PHG_OUTPUT_LEVEL=0

+
PHG_FINAL_OUTPUT=0

+
PHG_REDUCTION_METHOD=ipm

+
PHG_REDUCTION_LIMIT=100

+
PHG_VERTEX_VISIT_ORDER=0

+
PHG_EDGE_SCALING=0

+
PHG_VERTEX_SCALING=0

+
PHG_COARSEPARTITION_METHOD=greedy

+
PHG_REFINEMENT_METHOD=fm

+
PHG_REFINEMENT_LOOP_LIMIT=10

+
PHG_REFINEMENT_MAX_NEG_MOVE=100

+
PHG_BAL_TOL_ADJUSTMENT=0.7

+
PHG_RANDOMIZE_INPUT=0

+
PHG_EDGE_WEIGHT_OPERATION=max

+
EDGE_SIZE_THRESHOLD=0.25

+
PATOH_ALLOC_POOL0=0

+
PATOH_ALLOC_POOL1=0
Required Query Functions:
+

+
ZOLTAN_NUM_OBJ_FN

+
ZOLTAN_OBJ_LIST_FN +or ZOLTAN_FIRST_OBJ_FN/ZOLTAN_NEXT_OBJ_FN +pair

+
ZOLTAN_HG_SIZE_CS_FN +
+ ZOLTAN_HG_CS_FN +
Optional Query Functions:
+

+
ZOLTAN_HG_SIZE_EDGE_WTS_FN

+
ZOLTAN_HG_EDGE_WTS_FN
+

+It is possible to provide the graph query functions instead of the +hypergraph queries, though this is not recommended. If only graph query +functions are registered, Zoltan will automatically create a hypergraph +from the graph, but some information (specifically, edge weights) will +be lost.

+
[Table of Contents  | Next:  +Refinement Tree Partitioning  |  Previous:  +ParMetis  |  Privacy and Security] + + diff --git a/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_rcb.html b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_rcb.html new file mode 100644 index 00000000..fbd04e0a --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_rcb.html @@ -0,0 +1,207 @@ + + + + + + + + + Zoltan Developer's Guide: RCB + + + + + + +

+Appendix: Recursive Coordinate Bisection (RCB)

+  + +

+Outline of Algorithm

+ +

The implementation of Recursive Coordinate Bisection (RCB) in Zoltan +is due to Steve Plimpton of Sandia National Laboratories and was modified +by Matt St. John and Courtenay Vaughan. In this implementation of RCB, the +parallel computer is first divided into two pieces and then the computational +domain is divided into two pieces such that the proportion of work in each +piece is the same as the proportion of computational power. The division +of the parallel machine is done by a subroutine which is part of the support +for heterogenous architectures that is being built into the Zoltan library. +This process is repeated recursively on each subdomain and its associated +part of the computer. Each of these divisions are done with a cutting plane +that is orthogonal to one of the coordinate axes. + +

At each of these stages, each subdomain of processors and the objects +that are contained on those processors are divided into two sets based +on which side of the cutting plane each object is on. Either or both of +these sets may be empty. On each processor, the set of objects which are +on the same side of the cut as the processor are retained by the processor, +while the other objects are sent to processors on the other side of the +cut. In order to minimize the maximum memory usage in each set of processors, +the objects that are being sent to each set of processors are distributed +such that each each processor in a set has about the same number of objects +after the objects from the other set of processors are sent. In the case +when a processor has more objects that it will retain than the average +number of objects that the rest of the processors have in its set, then +that processor will not receive any objects. Thus each processor may send +and receive objects from several (or no) processors in the other set. The +process of determining which outgoing objects are sent to which processors +is determined in the subroutine Zoltan_Create_Proc_List. Once this new +distribution of objects is determined, the +unstructured communication package in +Zoltan is used to determine which processors are going to receive which +objects and actually move the objects. + +

For applications that wish to add more objects to the decomposition +at a later time (e.g., through Zoltan_LB_Box_Assign or Zoltan_LB_Point_Assign), information to do this can be retained during the +decomposition phase. This information is kept if the parameter KEEP_CUTS +is set during the decomposition (see the RCB section in the +Zoltan User's Guide). +This information about the decomposition can be thought of as a tree with +the nodes which have children representing the cut information and the nodes +with no children representing processors. An object is dropped through the +tree starting with the root node and uses the cut information at each node it +encounters to determine which subtree it traverses. When it reaches a terminal +node, the node contains the processor number that the object belongs to. +The information to construct the tree is saved during the decomposition. +At each step in the decomposition, when each set is divided into two sets, +the set with the lowest numbered processor is designated to be the left set +and the information about the cut is stored in the lowest numbered processor +in the other set of processors which is the right set. As a result of this +process, each processor will store information for, at most, one cut, since +once a processor stores information about a cut, by being the lowest numbered +processor in the right set, it will always be in a left set after each +subsequent cut since it will be the lowest numbered processor in the set +being cut and the set it is put into will be the left set. The processor +which stores the cut information also stores the root node as its parent. +After the end of the division process, all of the information is collected +onto all of the processors. The parent information is then used to establish +the leaf information for the parent. When this information is gathered, the +tree structure is stored in arrays with the array position determined by the +processor number that was storing the information. There is an array which +stores the position of the cut information for the left set and one for the +right set as well as arrays for the cut information. Given that the lowest +numbered processor after a cut is in the left set, the cut information is +stored in the right set, and there is one fewer cut than the total number of +processors, processor 0 has no cut information, so the 0 position of the right +set array is empty and is used to store the position in the array that the +first cut is stored. When this information is used to process an object, +array position 0 in the right set array is used to determine the array +position of the first cut. From there, which side of the cut the object is +on is determined and that information is used to determine which cut to test +the object against next. This process is repeated recursively until a +terminal node is encountered which contains the processor number that the +object belongs to. +

+When the parameter RCB_REUSE is +specified, the RCB algorithm attempts to use information from a previous +RCB decomposition to generate an "initial guess" at the new decomposition. +For problems that change little between invocations of RCB, using RCB_REUSE +can reduce the amount of data movement in RCB, improving the performance +of the algorithm. When RCB_REUSE is true,the coordinates of all objects obtained through query functions are passed through +Zoltan_LB_Point_Assign +to determine their processor assignment in the previous RCB decomposition. +The information for the objects is then sent to the new processor assignments +using the unstructured communication utilities +to generate an initial condition matching the output of the previous RCB +decomposition. +The normal RCB algorithm is then applied to this new initial condition. + +
  + +

+Data Structure Definitions

+ +

There are three major data structures in RCB and they are defined in +rcb/rcb.h and rcb/shared.h. The points which are being load balanced are represented as a +structure Dot_Struct which contains the location of the point, its weight, and +its originating processor number. The nodes on the decomposition tree are +represented by the structure rcb_tree which contains the position of the cut, +the dimension that the cut is perpendicular to, and the node's parent and two +children (if they exist) in the tree. The structure RCB_Struct is the RCB data +structure which holds pointers to all of the other data structures needed for +RCB. It contains an array of Dot_Struct to represent the points being load +balanced, global and local IDs for the points, and an array of rcb_tree (whose length is the number of processors) +which contains the decomposition tree. + +
  + +

+Parameters

+ +

The parameters used by RCB and their default values are described in the +RCB section of the Zoltan User's +Guide. These can be set by use of the Zoltan_RCB_Set_Param subroutine +in the file rcb/rcb.c. + +

+When the parameter REDUCE_DIMENSIONS +is specified, the RCB algorithm will perform lower dimensional +partitioning if the geometry is found to be degenerate. More information +on detecting degenerate +geometries may be found in another +section. + +
  + +

+Main Routine

+ +

The main routine for RCB is Zoltan_RCB in the file rcb/rcb.c. + +
  +
  +
  + +

+


+
[Table of +Contents  |  Next:  +Recursive Inertial Bisection (RIB)  |  +Previous:  Using the Test Script  |  Privacy and Security] + + diff --git a/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_refs.html b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_refs.html new file mode 100644 index 00000000..654f0d41 --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_refs.html @@ -0,0 +1,108 @@ + + + + + + + + + Zoltan Developer's Guide: References + + + + + + + +

+References

+ +
    +
  1. +M. J. Berger and S. H. Bokhari. "A partitioning strategy +for nonuniform problems on multiprocessors." IEEE Trans. Computers, +C-36 (1987), 570-580.
  2. + +
  3. +K. Devine, B. Hendrickson, M. St.John, E. Boman, +and C. Vaughan. "Zoltan: A Dynamic Load-Balancing +Library for Parallel Applications, User's Guide." Sandia National Laboratories +Tech. Rep. SAND99-1377, Albuquerque, NM, 1999.
  4. + +
  5. +H. C. Edwards. A Parallel Infrastructure For Scalable +Adaptive Finite Element Methods and Its Application To Least Squares C^(inf) +Collocation. Ph.D. Dissertation, University of Texas at Austin, May, +1997.
  6. + +
  7. +B. Hendrickson and K. Devine. "Dynamic Load Balancing +in Computational Mechanics." Comp. Meth. Appl. Mech. Engrg., 184 +(2000) 484-500.
  8. + +
  9. +B. Hendrickson and R. Leland.  ``The Chaco User's +Guide, version 2.0.''  Sandia National Laboratories Tech. Rep. SAND94-2692, +Albuquerque, NM, 1994.  http://cs.sandia.gov/CRF/chac.html
  10. + +
  11. +G. Karypis and V. Kumar.  ``ParMETIS:  +Parallel graph partitioning and sparse matrix ordering library.''  +Tech. Rep. 97-060, Dept. of Computer Science, Univ. of Minnesota, 1997.  +https://www-users.cs.umn.edu/~karypis/metis/parmetis/
  12. + +
  13. +C. Walshaw.  "Parallel Jostle Library Interface:  +Version 1.1.7."  Tech. Rep., Univ. of Greenwich, London, 1995.  +https://www.gre.ac.uk/jostle
  14. +
+ +
+
[Table of Contents  |  Next:  +Using Test Driver  |  Previous:  +C++ Interface  |  Privacy and Security] + + diff --git a/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_reftree.html b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_reftree.html new file mode 100644 index 00000000..32cbc873 --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_reftree.html @@ -0,0 +1,163 @@ + + + + + + + + + Zoltan Developer's Guide: Refinement Tree + + + + + + + +

+Appendix: Refinement Tree

+ +

+Overview of structure (algorithm)

+The refinement tree based partitioning algorithm was developed and implemented +by William Mitchell of the National Institute of Standards and Technology. +It is similar to the Octree method except that it uses a tree representation +of the refinement history instead of a geometry based octree. The method +generates a space filling curve which is cut into K appropriately-sized pieces +to define contiguous parts, where the size of a piece is the sum of the +weights of the elements in that piece. K, the number of parts, is not +necessarily equal to P, the number of processors. It is an appropriate load balancing +method for grids that are generated by adaptive refinement when the refinement +history is available. This implementation consists of two phases: the +construction of the refinement tree, and the definition of the parts. +

+Refinement Tree Construction

+The refinement tree consists of a root node and one node for each element +in the refinement history. The children of the root node are the elements +of the initial coarse grid. The children of all other nodes are the elements +that were formed when the parent element was refined. Upon first invocation, +the refinement tree is initialized. This creates the root node and initializes +a hash table that maps global IDs into nodes of the refinement tree. +It also queries the user for the elements of the initial grid and creates +the children of the root node. Unless the user provides the order +through which to traverse the elements of the initial grid, a path is +determined through the initial elements along with the "in" vertex and +"out" vertex of each element, i.e., the vertices through which the path +passes to move from one element to the next. +This path can be determined by a Hilbert space filling curve, Sierpinski +space filling curve (triangles only), or an algorithm that attempts to make +connected parts (connectivity is guaranteed for triangles and +tetrahedra). +The refinement tree is required to have all initial coarse grid elements, +not just those that reside on the processor. However, this requirement is not +imposed on the user; a communication step fills in the elements from other +processors. This much of the tree persists throughout execution of the +program. The remainder of the tree is reconstructed on each invocation of +the refinement tree partitioner. The remainder of the tree is built through +a tree traversal. At each node, the user is queried for the children of the +corresponding element. If there are no children, the user is queried for +the weight of the element. If there are children, the order of the children +is determined such that a tree traversal produces a space filling curve. +The user indicates what type of refinement was used to produce the children +(bisection of triangles, quadrasection of quadrilaterals, etc.). For each +supported type of refinement, a template based ordering is imposed. The +template also maintains an "in" and "out" vertex for each element +which are used by the template to determine the beginning and end of the space +filling curve through the children. If the +refinement is not among the types supported by templates, an exhaustive +search is performed to find an appropriate order, unless the user provides +the order. +

+Partition algorithm

+The algorithm that determines the parts uses four traversals of the +refinement tree. The first two traversals sum the weights in the tree. +In the first traversal, each node gets the sum of the weights of all the +descendant nodes that are assigned to this processor. The processors then +exchange information to fill in the partial sums for the leaf elements +that are not owned by this processor. (Note that an unowned leaf on one +processor may be the root of a large subtree on another processor.) +The second traversal completes the summation of the weights. The root +now has the sum of all the weights, which, in conjunction with an array +of relative part sizes, determines the desired weight of each part. +Currently the array of part sizes are all equal, but in the future +the array will be input to reflect heterogeneity in the system. The third +traversal computes the partition by adding subtrees to a part +until the size of the part meets the desired weight, and counts +the number of elements to be exported. Finally, the fourth traversal +constructs the export list. +

+Data structures

+The implementation of the refinement tree algorithm uses three data +structures which are contained in reftree/reftree.h. Zoltan_Reftree_data_struct +is the structure pointed to by zz->LB.Data_Structure. It contains a pointer +to the refinement tree root and a pointer to the hash table. +Zoltan_Reftree_hash_node is an entry in the hash table. It consists of a global ID, +a pointer to a refinement tree node, and a "next" pointer from which +linked lists at each table entry are constructed to handle collisions. +Zoltan_Reftree_Struct is +a node of the refinement tree. It contains the global ID, local ID, +pointers to the children, weight and summed weights, vertices of the +element, "in" and "out" vertex, a flag to indicate if this element is +assigned to this processor, and the new part number. +

+Parameters

+There are two parameters. REFTREE_HASH_SIZE determines the size of +the hash table. +REFTREE_INITPATH determines which +algorithm to use to find a path through the initial elements. +Both are set by Zoltan_Reftree_Set_Param in the file reftree/reftree_build.c. +

+Main routine

+The main routine is Zoltan_Reftree_Part in file reftree/reftree_part.c. +

+


+
[Table of Contents  +|  Next:   Hilbert Space-Filling Curve (HSFC) +|  Previous:  Hypergraph Partitioning  |  Privacy and Security] + + diff --git a/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_rib.html b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_rib.html new file mode 100644 index 00000000..a68842ae --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_rib.html @@ -0,0 +1,146 @@ + + + + + + + + + Zoltan Developer's Guide: RIB + + + + + + +

+Appendix: Recursive Inertial Bisection (RIB)

+ +

+Outline of Algorithm

+ +

The implementation of Recursive Inertial Bisection (RIB) in Zoltan is due +due to Bruce Hendrickson and Robert Leland of Sandia National Laboratories for +use in Chaco +and was modified by Courtenay Vaughan. RIB is an algorithm +similar to RCB (see the appendix on RCB for +a description of RCB) in that it uses the coordinates of the objects to be +balanced to do the load balancing. Similarly to RCB, the domain is +recursively divided into two pieces until the number of subdomains needed is +reached. In each stage of the division, the direction of the principle axis +of the domain to be divided is calculated by determining an eigenvector of +the inertial matrix. This direction vector is used to define a normal to a +plane which is used to divide the domain into two pieces. This process is +repeated until the desired number of subdomains is reached. + +

The communication of objects being divided is handled by the same routine +as is used by RCB. For applications which +wish to add more objects to the decomposition at a later time +(e.g., through Zoltan_LB_Box_Assign or +Zoltan_LB_Point_Assign), +information to +do this can be retained during the decomposition phase. This information is +kept if the parameter KEEP_CUTS is set during the decomposition. +The process is similar to that used for RCB, but the +information kept is different. For each RIB cut, the center of mass +of the subdomain which is cut, the direction vector, and a distance from +the center of mass to the cutting plane have to be saved. + +
  + +

+Data Structure Definitions

+ +

There are three major data structures in RIB and they are defined in +rcb/rib.h and rcb/shared.h. The points which are being load balanced are represented as a +structure Dot_Struct which contains the location of the point, its weight, and +the originating processor's number. The nodes on the decomposition tree are +represented by the structure rib_tree which contains the position of the cut, +the center of mass of the subdomain which is being cut, the direction vector +of the principle axis of the subdomain, and the node's parent and two +children (if they exist) in the tree. The structure RIB_Struct is the RIB data +structure which holds pointers to all of the other data structures needed for +RIB. It contains an array of Dot_Struct to represent the points being load +balanced, global and local IDs of the points, an array of rib_tree (whose length is the number of processors) which +contains the decomposition tree, and the dimension of the problem. + +
  + +

+Parameters

+ +

The parameters used by RIB and their default values are described in the +RIB section of the Zoltan User's +Guide. These can be set by use of the Zoltan_RIB_Set_Param subroutine +in the file rcb/rib.c. +

+When the parameter REDUCE_DIMENSIONS +is specified, the RIB algorithm will perform lower dimensional +partitioning if the geometry is found to be degenerate. More information +on detecting degenerate +geometries may be found in another +section. + + +
  + +

+Main Routine

+ +

The main routine for RIB is Zoltan_RIB in the file rcb/rib.c. + +
  +
  +
  + +

+


+
[Table of +Contents  |  Next:  ParMETIS and +Jostle  |  Previous:  Recursive +Coordinate Bisection (RCB)  |  Privacy and Security] + + diff --git a/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_services.html b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_services.html new file mode 100644 index 00000000..70d31666 --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_services.html @@ -0,0 +1,78 @@ + + + + + + + + + Zoltan Developer's Guide: Services + + + + + + +

+Services

+Within Zoltan, several services are provided to simplify development of new algorithms +in the library.  Each service consists of a routine or set of routines that is +compiled directly into Zoltan.  Use of these services makes debugging easier +and provides a uniform look to the algorithms in the library.  The +services available are listed below. +
+
Parameter Setting Routines +
Parallel Computing Routines +
Object List Function +
Hash Function +
Timing Routines +
Debugging Services
+ +
+
[Table of Contents  |  Next:  +Parameter Setting Routines  |  Previous:  Data Structures  |  Privacy and Security] + + diff --git a/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_services_debug.html b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_services_debug.html new file mode 100644 index 00000000..81af948d --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_services_debug.html @@ -0,0 +1,278 @@ + + + + + + + + + + Zoltan Developer's Guide: Debugging Services + + + + + + +

+Debugging Services

+Execution of code for debugging can be controlled by algorithm specific +parameters or by the Zoltan key parameter DEBUG_LEVEL. +The value of the Debug_Level field of the Zoltan_Struct +structure can be tested to determine whether the user desires debugging +information.  Several constants (ZOLTAN_DEBUG_*) are defined in +zz/zz_const.h; the Debug_Level field should be compared to +these values so that future changes to the debugging levels can be made +easily.  An example is included below. +

Several macros for common debugging operations are provided.  The +macros can be used to generate function trace information, such as when +control enters or exits a function or reaches a certain point in the execution +of a function. +

ZOLTAN_TRACE_ENTER +
ZOLTAN_TRACE_EXIT +
ZOLTAN_TRACE_DETAIL
+These macros produce output depending upon the value of the DEBUG_LEVEL +parameter set in Zoltan by a call to Zoltan_Set_Param.   +The macros are defined in zz/zz_const.h. +

Examples of the use of these macros can +be found below and in lb/lb_balance.c +and rcb/rcb.c. +

+ + +


+ +
+ZOLTAN_TRACE_ENTER(struct Zoltan_Struct +*zz, char *function_name); +
+
+ZOLTAN_TRACE_ENTER prints to stdout a message stating that +a given processor is entering a function.  The call to the macro should +be included at the beginning of major functions for which debugging information +is desired.  Output includes the processor number and the function +name passed as an argument to the macro.  The amount of output produced +is controlled by the value of the DEBUG_LEVEL +parameter set in Zoltan by a call to Zoltan_Set_Param. +
  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Arguments:
    zzPointer to a Zoltan structure. +
    function_nameCharacter string containing the function's name.
Output:
 ZOLTAN (Processor #) Entering function_name
+ +

+ + +


+ +
+ZOLTAN_TRACE_EXIT(struct Zoltan_Struct +*zz, char *function_name); +
+
+ZOLTAN_TRACE_EXIT prints to stdout a message stating that +a given processor is exiting a function.  The call to the macro should +be included at the end of major functions (and before return statements) +for which debugging information is desired.  Output includes the processor +number and the function name passed as an argument to the macro.  +The amount of output produced is controlled by the value of the DEBUG_LEVEL +parameter set in Zoltan by a call to Zoltan_Set_Param. +
  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Arguments:
    zzPointer to a Zoltan structure. +
    function_nameCharacter string containing the function's name.
Output:
 ZOLTAN (Processor #) Leaving function_name
+ +

+ + +


+ +
+ZOLTAN_TRACE_DETAIL(struct +Zoltan_Struct +*zz, char *function_name, char *message); +
+
+ZOLTAN_TRACE_DETAIL prints to stdout a message specified +by the developer.   It can be used to indicate how far execution +has progressed through a routine.  It can also be used to print values +of variables.  See the example below.  Output includes the processor +number, the function name passed as an argument to the macro, and a user-defined +message passed to the macro.  The amount of output produced is controlled +by the value of the DEBUG_LEVEL +parameter set in Zoltan by a call to Zoltan_Set_Param. +
  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Arguments:
    zzPointer to a Zoltan structure. +
    function_nameCharacter string containing the function's name.
    messageCharacter string containing a message defined by the developer.
Output:
 ZOLTAN (Processor #)  function_name: message
+ +
  +
  + + +

+


+
Example: +
An example using the debugging macros in shown below. + + + + +
+
[Table of Contents  |  Next:  +Adding New Load-Balancing AlgorithmsPrevious:  +ZOLTAN_TIMER  |  Privacy and Security] + + diff --git a/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_services_hash.html b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_services_hash.html new file mode 100644 index 00000000..55638b36 --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_services_hash.html @@ -0,0 +1,131 @@ + + + + + + + + + Zoltan Developer's Guide: Hash function + + + + + + + + +

+Hash Function

+Zoltan provides a hash function for global and local IDs. The hash function +computes a non-negative integer value in a certain range from an +ID. +
Zoltan_Hash  : hash a global +or local ID into non-negative integers
+ + +
+ +
+unsigned int Zoltan_Hash( +ZOLTAN_ID_PTR +key, +int num_id_entries, unsigned int n);  +
+Zoltan_Hash computes a hash value for a global or local ID. Note +that this hash function has been optimized for 32-bit integer systems, +but should  work on any machine. The current implementation uses a +simple multiplicative hash function based on Don Knuth's golden ratio method; +see The Art of  Computer Programming, vol. 3. +
  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Arguments:
    keyA pointer to the ID to be hashed.
    num_id_entriesThe length of the ID (as given by NUM_GID_ENTRIES +or NUM_LID_ENTRIES).
    nThe computed hash value will be between 0 and n-1. 
Return Value:
    unsigned intThe hash value (between 0 and n-1). 
+ +

+ +


+
[Table of Contents  |  Next:  +Timing Routines  |  Previous:  +Object List Function  |  Privacy and Security] + + diff --git a/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_services_objlist.html b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_services_objlist.html new file mode 100644 index 00000000..e2d9c327 --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_services_objlist.html @@ -0,0 +1,324 @@ + + + + + + + + + Zoltan Developer's Guide: Object List function + + + + + +

+Common Functions for Querying Applications

+ +Many Zoltan algorithms need to query applications for similar data. +The following functions provide simple, uniform query functionality for +algorithm developers: +
+Zoltan_Get_Obj_List
+Zoltan_Get_Coordinates
+
+These functions provide a uniform method of calling the query functions +registered by an application. +Their use simplifies new algorithm development and code +maintenance. Usage examples are in rcb/shared.c. + +

Zoltan_Get_Obj_List can be called +from any Zoltan algorithm to obtain a list of object IDs, weights, and +part assignments. + +

Given a list of object IDs, +Zoltan_Get_Coordinates +can be called from any Zoltan algorithm to obtain a list of coordinates for +those IDs. + +

Note that, contrary to most Zoltan functions, these functions allocate +memory for their return lists. + +

+


+
int Zoltan_Get_Obj_List( +
    struct Zoltan_Struct *zz, +
    int *num_obj, +
    ZOLTAN_ID_PTR *global_ids, +
    ZOLTAN_ID_PTR *local_ids, +
    int wdim, +
    float **objwgts, +
    int **parts); +
Zoltan_Get_Obj_List returns arrays of global and local IDs, +part assignments, and +object weights (if +OBJ_WEIGHT_DIM +is not zero) for all objects on a processor. It is a convenient function +that frees algorithm developers from calling +ZOLTAN_OBJ_LIST_FN, +ZOLTAN_FIRST_OBJ_FN, +ZOLTAN_NEXT_OBJ_FN, +and +ZOLTAN_PART_FN +query functions directly. +
  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Arguments:
    zzA pointer to the Zoltan structure created by Zoltan_Create.
    num_objUpon return,  the number of objects.
    global_idsUpon return, an array of global IDs of objects on the current processor.
    local_idsUpon return, an array of local IDs of objects on the current processor. +NULL is returned when +NUM_LID_ENTRIES +is zero.
    wdimThe number of weights associated with an object (typically 1), or 0 +if weights are not requested.
    objwgtsUpon return, an array of object weights. Weights for object i +are stored in +objwgts[i*wdim:(i+1)*wdim-1], for +i=0,1,...,num_obj-1. +If wdim is zero, the return value of objwgts is undefined and may +be NULL.
    partsUpon return, an array of part assignments. Object i +is currently in part parts[i]. +
Returned value:
Error code.
Required Query Functions:
ZOLTAN_NUM_OBJ_FN
ZOLTAN_OBJ_LIST_FN +or ZOLTAN_FIRST_OBJ_FN/ZOLTAN_NEXT_OBJ_FN +pair
Optional Query Functions:
ZOLTAN_PART_FN
+ +

+


+
int Zoltan_Get_Coordinates( +
    struct Zoltan_Struct *zz, +
    int num_obj, +
    ZOLTAN_ID_PTR global_ids, +
    ZOLTAN_ID_PTR local_ids, +
    int *num_dim, +
    double **coords); +
+Given lists of object IDs, +Zoltan_Get_Coordinates returns the dimensionality of the problem and +an array of coordinates of the objects. It is a convenient function +that frees algorithm developers from calling +ZOLTAN_NUM_GEOM_FN, +ZOLTAN_GEOM_MULTI_FN, +and +ZOLTAN_GEOM_FN +query functions directly. +
  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Arguments:
    zzA pointer to the Zoltan structure created by Zoltan_Create.
    num_objThe number of objects.
    global_idsAn array of global IDs of objects on the current processor.
    local_idsAn array of local IDs of objects on the current processor. +local_ids is NULL when +NUM_LID_ENTRIES +is zero.
    num_dimUpon return, the number of coordinates for each object (typically 1, 2 or +3). +
    coordsUpon return, an array of coordinates for the objects. Coordinates for +object i +are stored in +coords[i*num_dim:(i+1)*num_dim-1], for +i=0,1,...,num_obj-1. +
Returned value:
Error code.
Required Query Functions:
ZOLTAN_NUM_GEOM_FN
+ZOLTAN_GEOM_MULTI_FN +or +ZOLTAN_GEOM_FN +
+

+


+
[Table of Contents  |  Next:  +Hash Function  |  Previous:  +Parallel Routines  |  Privacy and Security] + + diff --git a/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_services_parallel.html b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_services_parallel.html new file mode 100644 index 00000000..c524ba20 --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_services_parallel.html @@ -0,0 +1,254 @@ + + + + + + + + + + Zoltan Developer's Guide: Parallel Routines + + + + + + + +

+Parallel Computing Routines

+Parallel computing utilities are described in this section. +
Zoltan_Print_Sync_Start +/ Zoltan_Print_Sync_End:  provide +synchronization of processors for I/O (with example). +
Zoltan_Print_Stats : print statistics +about a parallel variable.
+ + +
+ +
+void Zoltan_Print_Sync_Start(MPI_Comm communicator, +int do_print_line);  +
+ +

The Zoltan_Print_Sync_Start function is adapted from work of John +Shadid for the MPSalsa project at Sandia National Laboratories. With Zoltan_Print_Sync_End, +it provides synchronization so that one processor in the Zoltan +communicator can complete its I/O before the next processor begins its +I/O. This synchronization utility is useful for debugging algorithms, as +it allows the output from processors to be produced in an organized manner. +It is, however, a serializing process, and thus, does not scale well to +large number of processors. + +

Zoltan_Print_Sync_Start should called by each processor in the MPI +communicator before the desired I/O is performed. Zoltan_Print_Sync_End +is called by each processor after the I/O is performed. No communication +can be performed between calls to Zoltan_Print_Sync_Start and Zoltan_Print_Sync_End. +See the example below for usage of Zoltan_Print_Sync_Start. +
  + + + + + + + + + + + + + + + + + + +
Arguments:
    communicatorThe MPI communicator containing all processors to participate in the +synchronization. +
    do_print_lineA flag indicating whether to print a line of "#" characters before +and after the synchronization block. If do_print_line is TRUE, a +line is printed; no line is printed otherwise.
+  + +

+ + +


+ +
+void Zoltan_Print_Sync_End(MPI_Comm communicator, +int do_print_line);  +
+ +

The Zoltan_Print_Sync_End function is adapted from work of John Shadid +for the MPSalsa project at Sandia National Laboratories. With Zoltan_Print_Sync_Start, +it provides synchronization so that one processor in the Zoltan +communicator can complete its I/O before the next processor begins its +I/O. This synchronization utility is useful for debugging algorithms, as +it allows the output from processors to be produced in an organized manner. +It is, however, a serializing process, and thus, does not scale well to +large number of processors. + +

Zoltan_Print_Sync_Start should +called by each processor in the MPI communicator before the desired I/O +is performed. Zoltan_Print_Sync_End is called by each processor after +the I/O is performed. No communication can be performed between calls to +Zoltan_Print_Sync_Start and Zoltan_Print_Sync_End. +See the example below for usage of Zoltan_Print_Sync_End. +
  + + + + + + + + + + + + + + + + + + +
Arguments:
    communicatorThe MPI communicator containing all processors to participate in the +synchronization. +
    do_print_lineA flag indicating whether to print a line of "#" characters before +and after the synchronization block. If do_print_line is TRUE, a +line is printed; no line is printed otherwise.
+ + +


+ +
+void Zoltan_Print_Stats(MPI_Comm comm, int debug_proc, +double x, char *msg); +
+
+
Zoltan_Print_Stats is a very simple routine that computes the maximum and +sum of the variable x over all processors associated with the MPI +communicator comm. +It also computes and prints the imbalance +of x, that is, the maximum value divided by the average minus one. +If x has the same value on all processors, the imbalance is zero. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Arguments:
    comm The MPI Communicator to be used in maximum and sum operations. +
    debug_proc The processor from which output should be printed. +
    xThe variable of which one wishes to display statistics.
    msgA string that typically describes the meaning of x.
+ +


+

+ + + +
Example Using Zoltan_Print_Sync_Start/Zoltan_Print_Sync_End

+  +
+ + + + + +
...  +
if (zz->Debug_Level >= ZOLTAN_DEBUG_ALL) {  +
    int i;  +
    Zoltan_Print_Sync_Start(zz->Communicator, TRUE);  +
    printf("Zoltan: Objects to be exported from Proc +%d\n", zz->Proc);  +
    for (i = 0; i < *num_export_objs; i++) {  +
        printf("    +Obj: "); +
        ZOLTAN_PRINT_GID(zz, &((*export_global_ids)[i*zz->Num_GID])); +
        printf(" Destination: %4d\n",  +
              +(*export_procs)[i]);  +
    }  +
    Zoltan_Print_Sync_End(zz->Communicator, TRUE);  +
}
Example usage of Zoltan_Print_Sync_Start and Zoltan_Print_Sync_End +to synchronize output among processors.  (Taken from Zoltan_LB_Partition in +lb/lb_balance.c.)
+  + +

+


+
[Table of Contents  |  Next:  +Object List Function  |  Previous:  +Parameter Setting Routines  |  Privacy and Security] + + diff --git a/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_services_params.html b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_services_params.html new file mode 100644 index 00000000..2712706e --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_services_params.html @@ -0,0 +1,504 @@ + + + + + + + + + + Zoltan Developer's Guide: Parameter Setting Routines + + + + + + + +

+Parameter Setting Routines

+Zoltan allows applications to change a number of parameter settings at +runtime. This facility supports debugging by, for instance, allowing control +over the type and quantity of output. It also allows users to modify some +of the parameters that characterize the partitioning algorithms. The design +of the parameter setting routines was driven by several considerations. +First, we wanted to keep the user interface as simple as possible. Second, +we wanted to allow different Zoltan structures to have different +parameter settings associated with them. This second consideration precluded +the use of C's static global variables (except in a few special places). +The parameter routines described below allow developers to provide runtime +access to any appropriate variables. In some cases, +it is appropriate to allow developers to tinker with parameters that will +never be documented for users. +

Our solution to parameter setting is to have a single interface routine +Zoltan_Set_Param. +This function calls a set of more domain-specific parameter setting routines, +each of which is responsible for a domain-specific set of parameters. Assuming +there are no errors, the parameter name and new value are placed in a linked +list of new parameters which is maintained by the Zoltan structure. +When a partitioning method is invoked on a Zoltan structure, it +scans through this linked list using the Zoltan_Assign_Param_Vals +function, resetting parameter values that are appropriate to the method. +

In addition to the method-specific parameters, Zoltan also has a set +of so-called key parameters. These +are normally stored in the Zoltan structure and may be accessed by +any part of the Zoltan code (including all the methods). A list of the +key +parameters currently used in Zoltan can be found in the User's Guide. +

The routines that control parameter setting are listed below. Note that +these routines have been written to be as independent of Zoltan as possible. +Only a few minor changes would be required to use these routines as a separate +library. +

+Zoltan_Set_Param:  User +interface function that calls a set of method-specific routines. +
Zoltan_Set_Param_Vec:  +Same as Zoltan_Set_Param, but for vector parameters. +
Zoltan_Check_Param:  Routine +to check if parameter name and value are OK. +
Zoltan_Bind_Param: Routine to associate a parameter name with a variable. +
Zoltan_Bind_Param_Vec: Same as Zoltan_Bind_Param, but for vector parameters. +
Zoltan_Assign_Param_Vals:  +Scans list of parameter names & values, setting relevant parameters +accordingly. +
Zoltan_Free_Params: Frees a parameter +list.
+See also:  Adding new parameters in +Zoltan. +

+ + +


+ +
+int Zoltan_Set_Param(struct Zoltan_Struct +*zz, char *param_name, char *new_val); +
+
+

The Zoltan_Set_Param function is the user +interface for parameter setting. Its principle purpose is to call a +sequence of more domain-specific routines for setting domain-specific parameters +(e.g., Zoltan_RCB_Set_Param). If you are adding algorithms to Zoltan, +you must write one of these domain-specific parameter routines and modify +Zoltan_Set_Param +to call it. Zoltan_RCB_Set_Param can serve as a template for this task. +The arguments to this routine are two strings param_name and new_val. +The domain-specific routines return an integer value with the following +meaning. +

0 - The parameter name was found, and the value passed all +error checks. +
1 - The parameter name was not found among the parameters known by +the domain-specific routine. +
2 - The parameter name was found, but the value failed the error checking. +
3 - Same as 0, but do not add parameter and value to linked list. +
Other - More serious error; value is an error +code.
+If one of the domain-specific parameter routines returns with a 0, +Zoltan_Set_Param +adds the parameter and the value (both strings) to a linked list of such +pairs that is pointed to by the Params field of the zz structure. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Arguments:
   zzThe Zoltan structure whose parameter value is being modified.
   param_nameA string containing the name of the parameter being modified. It is +automatically converted to all upper-case letters.
   new_valThe new value desired for the parameter, expressed as a string.
Returned Value:
   intError code.
+ + + +
+ +
+int Zoltan_Set_Param_Vec(struct Zoltan_Struct +*zz, char *param_name, char *new_val, int index); +
+
+

+This routine works the same way as +Zoltan_Set_Param, but is used for vector parameters. +A vector parameter is a parameter that in +addition to a name also has a set of indices, usually starting at 0. Each entry (component) may have a different value. This routine +sets a single entry (component) of a vector parameter. If you want all entries (components) of a vector parameter to have the +same value, set the parameter using +Zoltan_Set_Param as if it were a scalar parameter. + + +


+ +
+int Zoltan_Check_Param( char *param_name, +char *new_val, PARAM_VARS *params, PARAM_UTYPE +*result, +int *matched_index); +
+
The Zoltan_Check_Param routine simplifies the task of writing your +own domain-specific parameter setting function. Zoltan_Check_Param compares +the param_name string against a list of strings that you provide, +and if a match is found it extracts the new value from the new_val +string. See Zoltan_RCB_Set_Param for an example of how to use this routine. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Arguments:
   param_nameA capitalized string containing the name of the parameter being modified.
   new_valThe new value desired for the parameter, expressed as a string.
   paramsThe data structure (defined in params/params_const.h) describing +the domain-specific parameters to be matched against. The data structure +is an array of items, each of which consists of four fields. The first +field is a string that is a capitalized name of a parameter. The second +field is an address that is unused in +Zoltan_Check_Param, but is used +in Zoltan_Assign_Param_Vals. The +third field is another capitalized string that indicates the type of the +parameter from the first field. Currently supported types are "INT", "INTEGER", +"FLOAT", "REAL", "DOUBLE", "LONG", "STRING" and "CHAR". It is easy to add additional types +by simple modifications to Zoltan_Check_Param and Zoltan_Assign_Param_Vals. +The fourth field is an integer that gives the dimension (length) of +the parameter, if it is a vector parameter. Scalar parameters have dimension 0. +The array is terminated by an item consisting of four NULL fields. See +Zoltan_RCB_Set_Param +for an example of how to set up this data structure. 
   resultStructure of information returned by Zoltan_Check_Param (defined +in params/params_const.h).  If param_name matches any +of the parameter names from the first field of the params data structure, + +Zoltan_Check_Param attempts to decode the value in new_val. +The type of the value is determined by the third field in the params +data structure. If the value decodes properly, it is returned in result
   matched_indexIf param_name matches, then matched_index returns the +index into the params array that corresponds to the matched parameter +name. The matched_index and result values allow the developer +to check that values being assigned to parameters are valid. 
Returned Value:
   int0 - param_name found in params data structure and new_val +decodes OK.  +
1 - param_name not found in params data structure.  +
2 - param_name found in params data structure but new_val +doesn't decode properly. 
+ + + +
+ +
+int Zoltan_Bind_Param (PARAM_VARS +*params, char *name, void *var); +
+
+
This routine is used to associate the name of a parameter in the parameter +array params with a variable pointed to by var. +
Note that since the variable to be bound can be of an arbitrary type, +the pointer should be cast to a void pointer. Zoltan_Bind_Param must +be called before Zoltan_Assign_Param_Vals, +where the actual assignment of values takes place. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Arguments:
   paramsThe data structure describing the domain-specific parameters to be +matched against. The data structure is an array of items, each of which +consists of four fields. The first field is a string that is a capitalized +name of a parameter. The second field is an address that is unused in +Zoltan_Check_Param, +but is used in Zoltan_Assign_Param_Vals. +The third field is another capitalized string that indicates the type +of the parameter from the first field. Currently supported types are "INT", +"INTEGER", "FLOAT", "REAL", "DOUBLE", "LONG", "STRING" and "CHAR". +It is easy to add additional types by simple modifications to Zoltan_Check_Param +and Zoltan_Assign_Param_Vals. +The fourth field is an integer that gives the dimension (length) of +the parameter, if it is a vector parameter. Scalar parameters have dimension 0. +The array is terminated by an item consisting of four NULL fields.
   nameA capitalized string containing the name of the parameter being modified.
   varA pointer to the variable  you wish to associate with the +parameter name name.  The pointer should be type cast to a +void pointer. The user is responsible for ensuring that the pointer really +points to a variable of appropriate type.  A NULL pointer may be used +to "unbind" a variable such that it will not be assigned a value upon future +calls to Zoltan_Assign_Param_Vals.
Returned Value:
   intError code.
+ +

+ + + +


+ +
+int Zoltan_Bind_Param_Vec(PARAM_VARS +*params, char *name, void *var, int dim); +
+
+

+Same as Zoltan_Bind_Param, but for vector parameters. The additional +parameter dim gives the dimension or length of the vector parameter. +

+ + +
+ +
+int Zoltan_Assign_Param_Vals(PARAM_LIST +*change_list, PARAM_VARS *params, +int debug_level, +int my_proc, int debug_proc); +
+
This routine changes parameter values as specified by the list of names +and new values which is associated with a Zoltan structure. To +use this routine, parameter values should first be set to their defaults, +and then +Zoltan_Assign_Param_Vals should be called to alter the values +as appropriate. See Zoltan_RCB for a template. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Arguments:
   change_listThe linked list of parameter names and values which is constructed +by Zoltan_Set_Param and is a field of an +Zoltan_Struct +data structure (defined in params/param_const.h). 
   paramsThe data structure (defined in params/params_const.h) describing +the domain-specific parameters to be matched against. The data structure +is an array of items, each of which consists of three fields. The first +field is a string which is a capitalized name of a parameter. The second +field is an address of the parameter which should be altered. The third +field is another capitalized string which indicates the type of the parameter +being altered. Currently supported types are "INT", "INTEGER", "FLOAT", "REAL", "DOUBLE", +"LONG", "STRING" and "CHAR". It is easy to add additional types by simple +modifications to Zoltan_Check_Param and +Zoltan_Assign_Param_Vals. +The array is terminated by an item consisting of three NULL fields. 
   debug_levelZoltan debug level. (Normally this is zz->Debug_Level.)
   my_procProcessor number. (Normally this is zz->Proc.)
   debug_procProcessor number for debugging. (Normally this is zz->Debug_Proc.)
Returned Value:
    intError code.
+The last three input parameters may seem strange. They are present to support +Zoltan's debugging features. If the parameter utility code is used outside +of Zoltan, these  parameters may be removed or simply set these input +values to zero in the function call. +

+ + +


+ +
+void Zoltan_Free_Params (PARAM_LIST +**param_list ); +
+
This routine frees the parameters in the list pointed to by param_list. +
  + + + + + + + + + + + + +
Arguments:
   param_listA pointer to a list (array) of parameters to be freed.  PARAM_LIST +is defined in params/param_const.h.
+ +

+ + +


+
[Table of Contents  |  Next:  +Parallel Computing Routines  |  Previous:  +Services  |  Privacy and Security] + + diff --git a/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_services_time.html b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_services_time.html new file mode 100644 index 00000000..e65bf983 --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_services_time.html @@ -0,0 +1,203 @@ + + + + + + + + + Zoltan Developer's Guide: Timing Routines + + + + + +

+Timing Routines

+To assist in performance measurements and profiling, several timing routines +are included in the Zoltan library. The main timer function, +Zoltan_Time, +provides access to at least two portable timers: one CPU clock and one +wall clock. On most systems, user time can also be measured. +A higher-level timing capability built using +Zoltan_Time is also available; see +ZOLTAN_TIMER for more +details. +

+The routines included in the utility are listed below. +

Zoltan_Time: Returns the +time (in seconds) after some fixed reference point in time. +
Zoltan_Time_Resolution:  +The resolution of the specified timer.
+Currently, the following timers are supported: +
    +
  • +ZOLTAN_TIME_WALL : wall-clock time.
  • + +
    On most systems, this timer calls MPI_Wtime. +
  • +ZOLTAN_TIME_CPU : cpu time.
  • + +
    On most systems, this timer calls the ANSI C function clock(). Note +that this timer may roll over at just 71 minutes. Zoltan_Time +attempts to keep track of the number of roll-overs but this feature will +work only if Zoltan_Time is called at +least once during every period between roll-overs. +
  • +ZOLTAN_TIME_USER : user time.
  • + +
    On most systems, this timer calls times(). Note that times() is required +by POSIX and is widely available, but it is not required by ANSI C so may +be unavailable on some systems. Compile Zoltan with -DNO_TIMES in this +case.
+Within Zoltan, it is recommended to select which timer to use by setting +the +TIMER general parameter +via Zoltan_Set_Param. +The default value of TIMER +is wall.  Zoltan stores an integer representation of the selected +timing method in zz->Timer. This value should be passed to Zoltan_Time, +as in Zoltan_Time(zz->Timer). +

+


+
double Zoltan_Time(int timer);  +
+

Zoltan_Time returns the time in seconds, measured from some fixed +reference time. Note that the time is not synchronized among different +processors or processes. The time may be either CPU time or wall-clock +time. The timer is selected through Zoltan_Set_Param. +
  + + + + + + + + + + + + + + + + + + + + + + + + +
Arguments:
   timerThe timer type (e.g., wall or cpu) represented as an integer. See top +of page for a list of valid values.
Returned Value:
   double The time in seconds. The time is always positive; a negative value +indicates an error.
+ +

+


+
double Zoltan_Time_Resolution(int +timer) ;  +
+
Zoltan_Time_Resolution returns the resolution of the current +timer.  The returned resolution is a lower bound on the actual resolution. +
  + + + + + + + + + + + + + + + + + + + + + + + + +
Arguments:
   timerThe timer type (e.g., wall or cpu) represented as an integer. See top +of page for a list of valid values.
Returned Value:
   double The timer resolution in seconds.  If the resolution is unknown, +-1 is returned.
+ +
  +

+


+
Example: +
Here is a simple example for how to use the timer routines: +
    double t0, t1, t2; +
    Zoltan_Set_Param(zz, "TIMER", "wall"); +
    t0 = Zoltan_Time(zz->Timer); +
    /* code segment 1  */ +
    t1 = Zoltan_Time(zz->Timer); +
    /* code segment 2  */ +
    t2 = Zoltan_Time(zz->Timer); +
    /* Print timing results */ +
    Zoltan_Print_Stats(zz->Communicator, zz->Debug_Proc, t1-t0, "Time +for part 1:"); +
    Zoltan_Print_Stats(zz->Communicator, zz->Debug_Proc, t2-t1, "Time +for part 2:"); +
    Zoltan_Print_Stats(zz->Communicator, zz->Debug_Proc, t2-t0, "Total +time :");
+ +
+
[Table of Contents  |  Next:  +Debugging ServicesPrevious:  +Hash Function  |  Privacy and Security] + + diff --git a/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_services_zoltantimer.html b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_services_zoltantimer.html new file mode 100644 index 00000000..ca195864 --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_services_zoltantimer.html @@ -0,0 +1,1042 @@ + + + + + + + + + Zoltan Developer's Guide: ZoltanTimer Utilities + + + + + + + +

+High-Level Timing Services: ZOLTAN_TIMER

+The ZOLTAN_TIMER provides high-level capability for instrumenting +code with timers and reporting the execution times measured. +The ZOLTAN_TIMER can store many separate timers within a single +ZOLTAN_TIMER object and associate a name with each timer for ease of +reporting. It can perform parallel synchronization among processors +using a time to ensure that all time is attributed to the correct section +of the timed code. Its output contains the maximum, minimum and average +times over sets of processors. +

+The ZOLTAN_TIMER uses +Zoltan_Time +to obtain the current clock time. +

+NOTE: The current implementation of ZOLTAN_TIMER relies on two assumptions +to work correctly. +

    +
  1. +ZOLTAN_TIMER assumes that individual timers within a single +ZOLTAN_TIMER object are initialized in the same order on all processors. +Times over multiple processors are accrued based on the value of the +timer_idx returned by Zoltan_Timer_Init, +so for accurate timing, +each processor should associate the same value of timer_idx with +the same section of code. +

    +

  2. +ZOLTAN_TIMER uses synchronization in +Zoltan_Timer_Print and +Zoltan_Timer_PrintAll, and optionally in +ZOLTAN_TIMER_START and +ZOLTAN_TIMER_STOP. For these routines to +work properly, then, all processors must call them at the same place in +the code to satisfy the synchronization. A possible workaround is to +provide MPI_COMM_SELF as an argument to these functions for single-processor +timing. +
+

+In future work, these constraints may be weakened so that, for instance, +different processors may have different numbers of timers or skip +synchronization points. +

+

+


+ + + + + + + + + + + + + + + + + + + + + + + +
+Source code location: + +Utilities/Timer +
+Function prototypes file: + +Utilities/Timer/zoltan_timer.h or include/zoltan_timer.h +Utilities/Timer/zoltan_timer_cpp.h or include/zoltan_timer_cpp.h +
+Library name: + +libzoltan_timer.a +
+Other libraries used by this library: + +libmpi.a and libzoltan_mem.a. +
+Routines: +
+ Zoltan_Timer_Create: Creates a ZOLTAN_TIMER object to store timers.
+ Zoltan_Timer_Init: Initializes a new timer.
+ ZOLTAN_TIMER_START: Starts a single timer.
+ ZOLTAN_TIMER_STOP: Stops a single timer.
+ Zoltan_Timer_Print: Prints the values of a single timer.
+ Zoltan_Timer_PrintAll: Prints the values of all timers.
+ Zoltan_Timer_Reset: Resets a single timer.
+ Zoltan_Timer_Copy: Copies a ZOLTAN_TIMER object to newly +allocated memory.
+ Zoltan_Timer_Copy_To: Copies a ZOLTAN_TIMER object to existing memory.
+ Zoltan_Timer_Destroy: Frees a ZOLTAN_TIMER object.
+
+
+Use in Zoltan: +
+The ZOLTAN_TIMER utilities are used in Zoltan's graph and hypergraph +algorithms. It is activated +by setting parameter use_timers to a positive integer value. +

+

+
+ + +
+ +
+ + + + + + + + + +
+C: + +struct Zoltan_Timer *Zoltan_Timer_Create( +int timer_flag); +
+C++: + +Zoltan_Timer_Object(int timer_flag); +
+
+Zoltan_Timer_Create allocates memory for storing information to be +used by the Zoltan_Timer. The pointer returned by this function is passed +to many subsequent functions. An application or Zoltan itself +may allocate more than one +Zoltan_Timer data structure; for example, a separate Zoltan_Timer may be +used in different partitioning algorithms or in different routines. +

+In the C++ interface, the Zoltan_Timer_Object class represents a Zoltan_Timer +data +structure and the functions that operate on it. It is the constructor that +allocates an instance of a Zoltan_Timer_Object. It has no return value. +

+ + + + + + + + + + + + + + + + + + + + + + + +
Input Arguments:
    timer_flag + +A flag indicating the type of timer to be used; it is passed to +calls to Zoltan_Time. +Valid values are +ZOLTAN_TIME_WALL, ZOLTAN_TIME_CPU, ZOLTAN_TIME_USER +and ZOLTAN_TIME_DEF (the default timer). See the +timing routines for more information +about the timer_flag values. +
Returned Value:
   struct Zoltan_Timer * +Pointer to memory for storage of Zoltan_Timer information. If an error occurs, +NULL will be returned in C. +
+ +

+ + +


+ +
+ + + + + + + + + +
+C: + +int Zoltan_Timer_Init( +struct Zoltan_Timer *zt, +int use_barrier, +const char *timer_name +); +
+C++: + +int Zoltan_Timer_Object::Init( +const int use_barrier, +const std::string & timer_name); +
+
+Zoltan_Timer_Init initializes a single timer within a Zoltan_Timer +object. Each timer in the Zoltan_Timer object +is assigned a unique integer, which is returned by +Zoltan_Timer_Init and is later used to indicate which timer to start or +stop. It is also used to accrue times across multiple processors. +Zoltan_Timer_Init may be called several times with the +same Zoltan_Timer object to create many different times within the object. +

+Note that processors must initialize multiple timers within a Zoltan_Timer +object in the same order to +ensure that the returned timer index value is the same on each processor. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Input Arguments:
   zt + +Pointer to the Zoltan_Timer struct returned by Zoltan_Timer_Create. +
   use_barrier + +Flag indicating whether to synchronize processors before starting or +stopping a timer. A value of 1 causes MPI_Barrier to be invoked before the +timer is started or stopped; a value of 0 provides no synchronization. +
   timer_name + +A character string associated with the timer; it is printed as the timer +name in Zoltan_Timer_Print and +Zoltan_Timer_PrintAll. +
Returned Value:
   int +The unique integer identifier for this timer. +
+ + +


+ +
+ + + + + + + + + +
+C: + +int ZOLTAN_TIMER_START( +struct Zoltan_Timer *zt, +int timer_idx, +MPI_COMM communicator +); +
+C++: + +int Zoltan_Timer_Object::Start( +const int timer_idx, +const MPI_COMM & communicator); +
+
+ZOLTAN_TIMER_START starts the timer with index timer_idx +associated with the Zoltan_Timer struct zt. Error checking ensures +that the timer is not already running before it is started. If the timer +timer_idx +was initialized with use_barrier=1, all processors should start +the timer at the same point in the code. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Input Arguments:
   zt + +Pointer to the Zoltan_Timer struct returned by Zoltan_Timer_Create. +
   timer_idx + +The integer timer index +(returned by Zoltan_Timer_Init) +of the timer to be started. +
   communicator + +The MPI communicator to be used for synchronization is the timer was +initialized with use_barrier=1. +
Returned Value:
   int +Error code indicating whether the timer started successfully. +
+ + +


+ +
+ + + + + + + + + +
+C: + +int ZOLTAN_TIMER_STOP( +struct Zoltan_Timer *zt, +int timer_idx, +MPI_COMM communicator +); +
+C++: + +int Zoltan_Timer_Object::Stop( +const int timer_idx, +const MPI_COMM & communicator); +
+
+ZOLTAN_TIMER_STOP stops the timer with index timer_idx +associated with the Zoltan_Timer struct zt. Error checking ensures +that the timer is already running before it is stopped. If the timer +timer_idx +was initialized with use_barrier=1, all processors should stop +the timer at the same point in the code. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Input Arguments:
   zt + +Pointer to the Zoltan_Timer struct returned by Zoltan_Timer_Create. +
   timer_idx + +The integer timer index +(returned by Zoltan_Timer_Init) +of the timer to be stopped. +
   communicator + +The MPI communicator to be used for synchronization is the timer was +initialized with use_barrier=1. +
Returned Value:
   int +Error code indicating whether the timer stopped successfully. +
+ +


+ +
+ + + + + + + + + +
+C: + +int Zoltan_Timer_Print( +struct Zoltan_Timer *zt, +int timer_idx, +int proc, +MPI_Comm comm, +FILE *fp +); +
+C++: + +int Zoltan_Timer_Object::Print( +const int timer_idx, +const int proc, +const MPI_Comm &comm, +FILE *fp +); +
+
+Zoltan_Timer_Print accrues accumulated times for a single timer +timer_idx across a communicator and computes the minimum, maximum and +average values across all processors of the MPI communicator comm. +These values, as well as the timer index timer_idx and timer name, +are then printed by processor proc. +Because of the synchronization needed to compute the minimum, maximum and +average values, Zoltan_Timer_Print must be called by all processors in +the communicator comm. Communicator MPI_COMM_SELF can be used to print +a single processor's timer values. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Input Arguments:
   zt + +Pointer to the Zoltan_Timer struct returned by Zoltan_Timer_Create. +
   timer_idx + +The integer timer index +(returned by Zoltan_Timer_Init) +of the timer to be printed. +
   proc + +The rank (within MPI communicator comm) of the processor that should +print the timer's values. +
   comm + +The MPI communicator across which minimum, maximum and average values +of the timer should be computed. +
   fp + +The file pointer to a open, writable file to which timer values +should be printed. +Special files stdout +and stderr are also legal values for this argument. +
Returned Value:
   int +Error code. +
+ + +


+ +
+ + + + + + + + + +
+C: + +int Zoltan_Timer_PrintAll( +struct Zoltan_Timer *zt, +int proc, +MPI_Comm comm, +FILE *fp +); +
+C++: + +int Zoltan_Timer_Object::PrintAll( +const int proc, +const MPI_Comm &comm, +FILE *fp +); +
+
+Zoltan_Timer_PrintAll accrues accumulated times for all timers +in zt across a communicator and computes the minimum, maximum and +average values across all processors of the MPI communicator comm. +The timer values for each timer, as well as its timer index and timer name, +are then printed by processor proc. +Because of the synchronization needed to compute the minimum, maximum and +average values, Zoltan_Timer_PrintAll must be called by all processors in +the communicator comm. Communicator MPI_COMM_SELF can be used to print +a single processor's timer values. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Input Arguments:
   zt + +Pointer to the Zoltan_Timer struct returned by Zoltan_Timer_Create. +
   proc + +The rank (within MPI communicator comm) of the processor that should +print the timer's values. +
   comm + +The MPI communicator across which minimum, maximum and average values +of the timer should be computed. +
   fp + +The file pointer to a open, writable file to which timer values +should be printed. +Special files stdout +and stderr are also legal values for this argument. +
Returned Value:
   int +Error code. +
+ + +


+ +
+ + + + + + + + + +
+C: + +int Zoltan_Timer_Reset( +struct Zoltan_Timer *zt, +int timer_idx, +int use_barrier, +const char *timer_name +); +
+C++: + +int Zoltan_Timer_Object::Reset( +const int timer_idx, +const int use_barrier, +const std::string & timer_name); +
+
+Zoltan_Timer_Reset resets the single timer represented by +timer_idx within a Zoltan_Timer +object. The accumulated time within the timer is reset to zero. +The timer's name timer_name and the flag use_barrier +indicating whether the +timer should be started and stopped synchronously across processors +may be changed as well. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Input Arguments:
   zt + +Pointer to the Zoltan_Timer struct returned by Zoltan_Timer_Create. +
   timer_idx + +The integer timer index +(returned by Zoltan_Timer_Init) +of the timer to be reset. +
   use_barrier + +Flag indicating whether to synchronize processors before starting or +stopping a timer. A value of 1 causes MPI_Barrier to be invoked before the +timer is started or stopped; a value of 0 provides no synchronization. +
   timer_name + +A character string associated with the timer; it is printed as the timer +name in Zoltan_Timer_Print and +Zoltan_Timer_PrintAll. +
Returned Value:
   int +Error code indicating whether or not the timer was reset correctly. +
+ + +


+ +
+ + + + + + + + + +
+C: + +struct Zoltan_Timer *Zoltan_Timer_Copy( +struct Zoltan_Timer *from); +
+C++: + +Zoltan_Timer_Object(const Zoltan_Timer_Object &from); +
+
+Zoltan_Timer_Copy creates a new ZOLTAN_TIMER object and copies the +state of the existing ZOLTAN_TIMER object from to the new object. +It returns the new ZOLTAN_TIMER object. +

+In C++, there is no direct interface to Zoltan_Timer_Copy. +Instead, the Zoltan_Timer_Object copy constructor invokes the C library +function Zoltan_Timer_Copy. +

+ + + + + + + + + + + + + + + + + + + + + + + +
Input Arguments:
   from + +Pointer to the Zoltan_Timer struct returned by Zoltan_Timer_Create whose state is to be copied to +new memory. +
Returned Value:
   struct Zoltan_Timer * +Pointer to memory for storage of the copied Zoltan_Timer information. +If an error occurs, NULL will be returned in C. +
+ + + +


+ +
+ + + + + + + + + +
+C: + +int Zoltan_Timer_Copy_To( +struct Zoltan_Timer **to, +struct Zoltan_Timer *from +); +
+C++: + +Zoltan_Timer_Object & operator =(const Zoltan_Timer_Object &from); +
+
+Zoltan_Timer_Copy_To copies one ZOLTAN_TIMER object to another, after +first freeing any memory used by the targe ZOLTAN_TIMER object and +re-initilizing it. +

+The C++ interface to Zoltan_Timer_Copy_To is through the +Zoltan_Timer_Object copy operator +which invokes the C library +function Zoltan_Timer_Copy_To. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Arguments:
   to + +Pointer to the Zoltan_Timer struct +whose state is to be overwritten with the state of from. +
   from + +Pointer to the Zoltan_Timer struct returned by Zoltan_Timer_Create whose state is to be copied to +to. +
Returned Value:
   int +Error code. +
+ + +


+ +
+ + + + + + + + + +
+C: + +void Zoltan_Timer_Destroy( +struct Zoltan_Timer **zt); +
+C++: + +~Zoltan_Timer_Object(); +
+
+Zoltan_Timer_Destroy frees memory allocated by +Zoltan_Timer_Create and in C, sets the +timer pointer zt to NULL. Zoltan_Timer_Destroy should be +called when an application is finished using a timer object. +

+In C++, the Zoltan_Timer_Object class represents a Zoltan_Timer +data +structure and the functions that operate on it. Zoltan_Timer_Destroy +is called by the +destructor for the Zoltan_Timer_Object. +

+ + + + + + + + + + + + + + + + + + + + + + + +
Input Arguments:
   zt + +Pointer to the pointer to the Zoltan_Timer struct returned by Zoltan_Timer_Create. Upon return, zt is set to NULL. +
Returned Value:
   None. +
+ +

+ + +

+ +


+
[Table of Contents  |  Next:  +Debugging Services  |  Previous:  +Timing Routines  |  Privacy and Security] + + diff --git a/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_test_script.html b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_test_script.html new file mode 100644 index 00000000..17c2e48c --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_test_script.html @@ -0,0 +1,229 @@ + + + + + + + + + Zoltan Developer's Guide: Running test_zoltan + + + + + + +

+Appendix:  Using the test script test_zoltan

+ +Note: The script test_zoltan as described below is obsolete. +It will work in some instances, but is no longer generally supported. +

+Automated testing is now supported through CMake. Build +Zoltan +through CMake with option +

+"-D Zoltan_ENABLE_Tests:BOOL=ON" +
+

+Automated tests can be then run with command +

+make test +
+The testing environment uses script zoltan/test/ctest_zoltan.pl. +To add new zdrive.inp files to an existing test, add the files to +the appropriate directory and to the glob commands in ctest_zoltan.pl. +(Follow the example of zdrive.inp.rcb in ctest_zoltan.pl. +

+To add new test directories, create the directories and populate them +with input files. Copy file CMakeLists.txt +from zoltan/test/ch_simple to the new directories, +and edit the test names and numbers of processors in the new files. +Also add the new directories to zoltan/test/CMakeLists.txt, following +the example of ch_simple. The new tests will now be run in the CMake +environment. +

+Results of automated nightly testing are posted to the CDASH +dashboard. + +


+
+OBSOLETE +

+The purpose of the Zoltan test script is to run the test driver zdrive (or +zfdrive) +on a set of test problems to verify that the Zoltan library works correctly. +The script compares the output of actual runs with precomputed output. +The assumption is that if the outputs are identical, then the current implementation +is is likely to be correct. +Small differences may occur depending on the architectures used; developers +should examine the output and use their judgement in determining its correctness. +It is strongly recommended that developers +run test_zoltan to verify correctness before committing changes +to existing code! +
  +

+How to run test_zoltan

+First make sure you have compiled the driver zdrive (or zfdrive).  +Then go to the Zoltan directory Zoltan/tests and type test_zoltan +with suitable options as described below. This will run the test script +in interactive mode. The output from the driver will be sent to stdout +and stderrstdout +and stderr +with a summary of results. The summary of results is also saved in a log +file. If  an error occured, look at the log file to find out what +went wrong. The script currently assumes that runs are deterministic +and reproducible across all architectures, +which is not necessarily true. +Hence false alarms may occur. +
  +

+Syntax

+     test_zoltan [-arch arch-type] [-cmd +command] +[other options as listed below] +

It is required to use either the -arch or the -cmd option.  The +other arguments are optional. +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Options:
-arch arch-typeThe architecture on which the driver is to run. For a +list of currently supported architectures, type test_zoltan with no +arguments. +
-cmd commandThe command is the command that the script +uses to launch the driver. One must include an option to specify the number +of processors as part of the command. Use quotes appropriately; for example, +-cmd +'mpirun -np'. Default settings have been provided for all the supported +architectures.
-logfile filenameThe name of the log file. The default is test_zoltan.log. +If an old log file exists, it will be moved to test_zoltan.log.old.
-no_parmetisDo not run any ParMETIS +methods.
-no_nemesisDo not run test problems in Nemesis format.
-no_chacoDo not run test problems +in Chaco format.
-yes_fortranRun the Fortran90 driver zfdrive +instead of zdrive.
+ +

The default behavior is to run +zdrive +all methods on all types of input format. +
  +

+Test problems

+The test problems are included in subdirectories of the Zoltan/test +directory. Problems using Chaco input files are in subdirectories +ch_*; problems using Nemesis input files are in subdirectories +nem_*. +Please see the README files located in each test directory for more +details on these test problems. +

+Load balancing methods

+Many different load-balancing methods are currently tested in test_zoltan. +Input files for the methods are found in the test problem subdirectories. +The input files are named zdrive.inp.<method>, where <method> indicates which load-balancing method is passed to Zoltan. +To run only a subset of the methods, +edit the test_zoltan script manually; searching for "rcb" shows +which lines of the script must be changed. +
  +

+Number of processors

+The script test_zoltan runs each test problem on a predetermined number of processors, currently +ranging from 3 to 9. +

+


+
[Table of Contents  |  Next: +RCB  |  Previous:  Visualization of Geometric Partitions  |  Privacy and Security] + + diff --git a/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_view.html b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_view.html new file mode 100644 index 00000000..f918fef8 --- /dev/null +++ b/Zoltan-3.90/doc/Zoltan_html/dev_html/dev_view.html @@ -0,0 +1,241 @@ + + + + + + + + + Zoltan Developer's Guide: Visualization of Geometric Partitions + + + + + + + +

+Appendix: Visualization of Geometric Partitions +

+ +Graphical images of partitioned meshes can help you to understand the +geometric partitioning algorithms of Zoltan and to debug new or existing +algorithms. The following sections describe methods for visualizing the +partitions computed by the test drivers. + +

+2D problems with gnuplot +

+ +To view the result of a 2D decomposition performed by the +test driver, +use the "gnuplot output" option of the test driver input file, +as described in zdrive.inp. The test driver +will write a file that can be loaded into gnuplot. +The result for the test mesh in directory ch_hammond, partitioned into four +regions with RCB, is show below. The first picture is obtained from the input file + zdrive.inp.gnuplot1 with gnuplot output = 1. The second +picture is obtained from the input file zdrive.inp.gnuplot2 +with gnuplot output = 2. Both pictures have been zoomed so +that the xrange is [-0.2,1.2] and the yrange is [-0.3,0.35]. +

+[gnuplot 2D view] +

+

+[gnuplot 2D view] +

+ +

+3D problems with vtk_view +

+3D visualization requires downloading and compiling the +Visualization Toolkit (VTK) library (version +5.0 or later). You can then use the Zoltan top level makefile to build +the vtk_view application found in the util directory of +Zoltan. Build details can be found in the Config.generic file +in Utilities/Config. Note that you will have to download and +build CMake, the makefile generator +used by VTK, before you can build VTK. +

+vtk_view is a parallel MPI program. It does not need to be +run with the same number of processes with which you ran zdrive. You +can choose the number of processes based on the size of the input mesh you +will be visualizing, and the computational load of rendering it to an image +at interactive rates. +

+If you run vtk_view in the directory in which you ran +the test driver, the following will happen: +

    +
  • vtk_view will read zdrive.inp, or another input parameter file +if you specify a different file on the command line. +
  • It will read in the same input Chaco or Exodus II mesh that the test driver +read in. +
  • It will read in the file_name.out.p.n files that the +test driver wrote listing the part assigned to every global ID. +
  • It will open a window on your display, showing the input mesh. For +Chaco files, the mesh vertices will be colored by the part into which +Zoltan placed them. For Exodus II files, the mesh elements will be so colored. +A scalar bar in the window indicates the mapping from colors to part +numbers. A caption describes the input file name, the decomposition +method, the Zoltan parameter settings, and so on. You can use your mouse +to rotate the volume, pan and zoom in and out. +
+

+The example below shows how vtk_view displays the mesh in the test directory +ch_brack2_3 after it has been partitioned with HSFC across 5 processes. +

+[vtk_view 3D view] +

+If no test driver output files are found, vtk_view will +display the mesh without part IDs. +

+There are a few additional options that can be added to the test driver +input file, that are specifically for vtk_view. +

+ + + + + + + + + + + + +
zdrive count = <number>the number of file_name.out.p.n files, also the value of p
image height = <number>number of pixels in height of image (default is 300)
image width = <number>number of pixels in width of image (300)
omit caption = <1 or 0>do not print default caption in window if "1" (0)
omit scalar bar = <1 or 0>do not print scalar bar in window if "1" (0)
add caption = <text of caption>display indicated text in the window (no caption)
+

+The zdrive count option may be required if you have more than one +set of test driver output files in the directory. Otherwise, vtk_view +will look for files of the form file_name.out.p.n for +any value p. Note that since the window may be resized with the +mouse, you may not need image height and image width +unless you must have a very specific window size. Also note that if you +ran the Fortan test driver zfdrive, you will need to rename the +output files from file_name.fout.p.n to +file_name.out.p.n. + +

+Off-screen rendering with vtk_write +

+In some situations it is not possible or not convenient to open a window +on a display. In that case, you can compile util/vtk_view.cpp with the flag +OUTPUT_TO_FILE and it will create a program that renders the image +to a file instead of opening a window on a display. (The Zoltan top level makefile +does exactly this when you use the vtk_write target.) +

+Note that while +vtk_view is built with OpenGL and VTK, +vtk_write must be built +with Mesa GL and a version of the +VTK libraries that you have compiled with +special Mesa flags and with the Mesa header files. This is because +OpenGL implementations are not +in general capable of off-screen rendering, and Mesa GL is. The +Config.generic file in Utilities/Config describes in +detail how to build Mesa and then VTK for off-screen rendering. +

+vtk_write goes through the same steps that vtk_view does, +except at the end it writes one or more image files instead of opening +a window on your display. The images begin with a camera focused on the +mesh, pointing in the direction of the negative Z-axis. The positive +Y-axis is the "up" direction, and we use a right-handed coordinate +system. (So the X-axis is pointing to the right.) The camera can +revolve around the mesh in 1 degree increments. +

+The zdrive count, image width, and image height +options listed above also apply to vtk_write. In addition, you +can use these options to govern the output images. + +

+ + + + + + + + + + + + +
output format = <format name>choices are tiff, png, jpeg, ps and bmp (default is tiff)
output name = <file name>base name of image file or files (outfile)
output frame start = <number>first frame, between 0 and 360 (0)
output frame stop = <number>last frame, between 0 and 360 (0)
output frame stride = <number>the difference in degrees from one frame to the next (1)
output view up = <x y z>the direction of "up" as camera points at mesh (0 1 0)
+ +

+Other file formats +

+vtk_view was written to post-process zdrive runs, so it +only reads Chaco or Exodus II/Nemesis meshes. +If you are working with a different mesh-based file format, it +is still possible that you could use vtk_view or vtk_write +to view the parts assigned to your mesh by some application using the +Zoltan library. VTK +at this point in time has readers for many different file formats. If VTK +has a reader for your format, then +modify the read_mesh function in util/vtk_view.cpp to use that +reader. +

+You can then hard-code vtk_view to read your file, or you can +modify read_cmd_file in driver/dr_input.c to accept +a specification of your file type in addition to Chaco and Nemesis. If +you do the latter you can create a zdrive-style +input file in which to specify your file name and other visualization +parameters. +

+Finally, you need to create text files listing each global ID you supplied +to Zoltan, followed by the part ID assigned by Zoltan, with only +one global ID/part ID pair per line. Name this +file or files using the conventions used by the +test drivers. + +


+
[Table of Contents  |  Next:  +Using the Test Script  |  Previous:  +Using the Test Drivers  |  Privacy and Security] + + +# +# This line is used to specify the algorithm that Zoltan will use +# for load balancing. Currently, the following methods that are acceptable: +# rcb - Reverse Coordinate Bisection +# octpart - Octree/Space Filling Curve +# parmetis - ParMETIS graph partitioning +# reftree - Refinement tree partitioning +# +#----------------------------------------------------------------------------- +Decomposition Method = rcb + +#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# Zoltan Parameters = +# +# This line is OPTIONAL. If it is not included, no user-defined parameters +# will be passed to Zoltan. +# +# This line is used to to specify parameter values to overwrite the default +# parameter values used in Zoltan. These parameters will be passed to Zoltan +# through calls to Zoltan_Set_Param(). Parameters are set by entries consisting +# of pairs of strings "=". +# The should be a string that is recognized by the +# particular load-balancing method being used. +# The parameter entries should be separated by commas. +# When many parameters must be specified, multiple +# "Zoltan Parameters" lines may be included in the input file. +# NOTE: The Fortran90 driver zfdrive can read only one parameter per line. +#----------------------------------------------------------------------------- +Zoltan Parameters = DEBUG_LEVEL=3 +Zoltan Parameters = RCB_REUSE=0 + +#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# File Type = <,chaco or Matrix Market options> +# +# This line is OPTIONAL. If it is not included, then it is assumed that +# the file type is parallel nemesis. +# +# This line indicates which format the file is in. The current +# file types for this line are: +# NemesisI - parallel ExodusII/NemesisI files (1 per processor) +# Chaco - Chaco graph and/or geometry file(s) +# hypergraph - format documented in driver/dr_hg_readfile.c, +# suffix .hg +# matrixmarket - Matrix Market exchange format, suffix .mtx +# matrixmarket+ - our enhanced Matrix Market format, documented in +# driver/dr_hg_io.c, includes vertex and edge weights, +# and process ownership of matrix data for +# a distributed matrix, suffix .mtxp +# +# For NemesisI input, the initial distribution of data is given in the +# Nemesis files. For Chaco input, however, an initial decomposition is +# imposed by the zdrive. Four initial distribution methods are provided. +# The method to be used can be specified in the chaco options: +# initial distribution =