diff --git a/src/LowTasks.c b/src/LowTasks.c index a84f3d0..ea9840d 100644 --- a/src/LowTasks.c +++ b/src/LowTasks.c @@ -22,7 +22,7 @@ int PrimitiveVariables () { var |= VY; #endif #ifdef Z - var |= VZ; + var |= VZ; #endif #ifdef MHD @@ -116,7 +116,7 @@ void MakeDir (char *string) { /* If all processes see the same partition, only the first process will create the directory. Alternatively, they will create as many directories as necessary. For instance, if we have say 4 PEs per node - and each node sees its own scratchdir, nbprocesses/4 + and each node sees its own scratchdir, nbprocesses/4 mkdir() commands will be issued */ if (CPU_Rank) MPI_Recv (&foo, 1, MPI_INT, CPU_Rank-1, 53, MPI_COMM_WORLD, &fargostat); dir = opendir (string); @@ -199,7 +199,7 @@ void InitSpace() { real dx,dy, dz; real x0; int i,j,k; - + FILE *domain; char domain_out[512]; real ymin, zmin, xmin; @@ -212,7 +212,7 @@ void InitSpace() { int init = 0; int j_global, j_local; - + if (*SPACING=='F') { //Fixed spacing #ifdef X masterprint("Warning: zone spacing will be taken from the files domain_i.dat.\n"); @@ -232,7 +232,7 @@ void InitSpace() { already_x = YES; } fclose(domain); -#endif +#endif #ifdef Y sprintf(domain_out, "%s%s", OUTPUTDIR, "domain_y.dat"); domain = fopen(domain_out, "r"); @@ -250,7 +250,7 @@ void InitSpace() { } already_y = YES; } - + fclose(domain); #endif #ifdef Z @@ -297,11 +297,11 @@ void InitSpace() { } // Fill ghost zones using the fact that du is constant and we bisect in reverse order if (J == 0) { - for (j = 0; j < NGHY; j++) + for (j = 0; j < NGHY; j++) Ymin(NGHY - (j+1)) = bisect(0.5*YMIN, Ymin(NGHY-j), NY+1, uy, 1); } if (J == Ncpu_x - 1) { - for (j = 0; j < NGHY; j++) + for (j = 0; j < NGHY; j++) Ymin(Ny+NGHY+j+1) = bisect(Ymin(Ny+NGHY+j), 1.5*YMAX, NY+1, uy, 0); } @@ -314,7 +314,7 @@ void InitSpace() { } #endif //Z } - + else { @@ -333,7 +333,7 @@ void InitSpace() { #else dz = 0; #endif - + if (((toupper(*SPACING)) == 'L') && ((toupper(*(SPACING+1))) == 'O')) { //Logarithmic masterprint("Warning: The Y spacing is logarithmic.\n"); dy = (log(YMAX)-log(YMIN))/NY; @@ -363,7 +363,7 @@ void InitSpace() { Xmin(i) = XMIN + dx*(i-NGHX); #else Xmin(i) = 0.0; -#endif +#endif } } @@ -381,10 +381,11 @@ void InitSpace() { InvDiffXmed(i) = 1./(Xmed(i)-Xmed(i-1)); } InvDiffXmed(0) = 1./( Xmed(0)- (Xmed(Nx-1)-(XMAX-XMIN) )); - + MPI_Barrier(MPI_COMM_WORLD); - + +#ifndef HDF5 if (!already_x) { if(CPU_Master) { sprintf(domain_out, "%s%s", OUTPUTDIR, "domain_x.dat"); @@ -395,17 +396,17 @@ void InitSpace() { } } } - + if (!already_y) { if (CPU_Rank > 0) { // Force sequential write MPI_Recv (&relay, 1, MPI_INT, CPU_Rank-1, 42, MPI_COMM_WORLD, MPI_STATUS_IGNORE); } - + sprintf(domain_out, "%s%s", OUTPUTDIR, "domain_y.dat"); if(CPU_Master) { domain = fopen(domain_out, "w"); jmin = 0; - jmax = Ny+NGHY+1; + jmax = Ny+NGHY+1; } else { if (CPU_Rank < Ncpu_x) { @@ -428,9 +429,9 @@ void InitSpace() { } MPI_Barrier (MPI_COMM_WORLD); - + if (!already_z) { - if (CPU_Rank > 0) { // Force sequential read + if (CPU_Rank > 0) { // Force sequential write MPI_Recv (&relay, 1, MPI_INT, CPU_Rank-1, 43, MPI_COMM_WORLD, MPI_STATUS_IGNORE); } sprintf(domain_out, "%s%s", OUTPUTDIR, "domain_z.dat"); @@ -438,7 +439,7 @@ void InitSpace() { domain = fopen(domain_out, "w"); jmin = 0; jmax = Nz+NGHZ+1; - } + } else { if (J == 0) { domain = fopen(domain_out, "a"); @@ -454,12 +455,13 @@ void InitSpace() { } fclose(domain); } - if (CPU_Rank < CPU_Number-1) { // Force sequential read + if (CPU_Rank < CPU_Number-1) { // Force sequential write MPI_Send (&relay, 1, MPI_INT, CPU_Rank+1, 43, MPI_COMM_WORLD); } - + MPI_Barrier (MPI_COMM_WORLD); } +#endif } @@ -494,7 +496,7 @@ for(i=1;iowner) = Emfx; *(Emfy->owner) = Emfy; *(Emfz->owner) = Emfz; - - Divergence = CreateField("divb", 0, 0,0,0); + + Divergence = CreateField("divb", 0, 0,0,0); #endif @@ -712,7 +714,7 @@ real ComputeMass() { real totalmass; real *rho; - + INPUT (Density); rho = Density->field_cpu; @@ -823,7 +825,7 @@ int RestartSimulation(int n) { __Restart = RestartVTK; else __Restart = RestartDat; - + if (Dat2vtk) { Merge = YES; __Restart = RestartDat; @@ -849,11 +851,11 @@ int RestartSimulation(int n) { __Restart(Bz, n); #endif #endif - + #ifdef MPIIO MPI_Offset offset; offset = 0; //We start at the begining of the file - + offset = ParallelIO(Density, n, MPI_MODE_RDONLY, offset,FALSE); if(Fluidtype != DUST) offset = ParallelIO(Energy, n, MPI_MODE_RDONLY, offset,FALSE); #ifdef X @@ -869,11 +871,11 @@ int RestartSimulation(int n) { if(Fluidtype == GAS){ offset = ParallelIO(Bx, n, MPI_MODE_RDONLY, offset,FALSE); offset = ParallelIO(By, n, MPI_MODE_RDONLY, offset,FALSE); - offset = ParallelIO(Bz, n, MPI_MODE_RDONLY, offset,FALSE); + offset = ParallelIO(Bz, n, MPI_MODE_RDONLY, offset,FALSE); } #endif #endif - + begin = n*NINTERM; if (PostRestart) PostRestartHook (); @@ -903,9 +905,9 @@ void RestartVTK(Field *f, int n) { masterprint("Error reading %s\n", filename); exit(1); } - + masterprint("Reading %s\n", filename); - + while(1) { temp = fscanf(fi, "%s\n", line); if (strcmp(line,"LOOKUP_TABLE") == 0){ @@ -913,7 +915,7 @@ void RestartVTK(Field *f, int n) { break; } } - + i = j = k = 0; #ifndef SPHERICAL @@ -945,9 +947,9 @@ void RestartVTK(Field *f, int n) { masterprint("Error reading %s\n", filename); exit(1); } - + masterprint("Reading %s\n", filename); - + while(1) { temp = fscanf(fi, "%s\n", line); if (strcmp(line,"LOOKUP_TABLE") == 0){ @@ -956,7 +958,7 @@ void RestartVTK(Field *f, int n) { break; } } - + i = j = k = 0; origin = Y0+Z0*NX*NY; @@ -1009,7 +1011,7 @@ void RestartDat(Field *field, int n) { exit(1); } masterprint("Reading %s\n", filename); - + for (k=NGHZ; kfield_cpu; real* vx0 = Vx0->field_cpu; @@ -72,7 +72,7 @@ void _init_stockholm() { #endif real* rho = Density->field_cpu; real* rho0 = Density0->field_cpu; - + #ifdef Z for (k=0; kname); - Write2D(Density0, outputname, OUTPUTDIR, GHOSTINC); +#ifdef HDF5 + WriteOutputs2dHdf5(); +#else + sprintf(outputname,"%s0_2d.dat",Density->name); + Write2D(Density0, outputname, OUTPUTDIR, GHOSTINC); #ifdef X - sprintf(outputname,"%s0_2d.dat",Vx->name); - Write2D(Vx0, outputname, OUTPUTDIR, GHOSTINC); + sprintf(outputname,"%s0_2d.dat",Vx->name); + Write2D(Vx0, outputname, OUTPUTDIR, GHOSTINC); #endif #ifdef Y - sprintf(outputname,"%s0_2d.dat",Vy->name); - Write2D(Vy0, outputname, OUTPUTDIR, GHOSTINC); + sprintf(outputname,"%s0_2d.dat",Vy->name); + Write2D(Vy0, outputname, OUTPUTDIR, GHOSTINC); #endif #ifdef Z - sprintf(outputname,"%s0_2d.dat",Vz->name); - Write2D(Vz0, outputname, OUTPUTDIR, GHOSTINC); + sprintf(outputname,"%s0_2d.dat",Vz->name); + Write2D(Vz0, outputname, OUTPUTDIR, GHOSTINC); #endif #ifdef ADIABATIC - sprintf(outputname,"%s0_2d.dat",Energy->name); - Write2D(Energy0, outputname, OUTPUTDIR, GHOSTINC); + sprintf(outputname,"%s0_2d.dat",Energy->name); + Write2D(Energy0, outputname, OUTPUTDIR, GHOSTINC); +#endif #endif } @@ -130,7 +134,7 @@ void init_stockholm() { static boolean init = TRUE; if (init) MULTIFLUID(_init_stockholm()); - + init = FALSE; } diff --git a/src/main.c b/src/main.c index e06ce00..da5e287 100644 --- a/src/main.c +++ b/src/main.c @@ -1,6 +1,6 @@ /** \file main.c -Main file of the distribution. +Main file of the distribution. Manages the call to initialization functions, then the main loop. @@ -13,7 +13,7 @@ real dt; real dtemp = 0.0; int main(int argc, char *argv[]) { - + int i=0, OutputNumber = 0, d; char sepline[]="==========================="; sprintf (FirstCommand, "%s", argv[0]); @@ -122,7 +122,7 @@ int main(int argc, char *argv[]) { NbRestart = atoi(argv[i+1]); if ((NbRestart < 0)) { masterprint ("Incorrect output number\n"); - PrintUsage (argv[0]); + PrintUsage (argv[0]); } } if (strchr (argv[i], 'B')) { @@ -132,7 +132,7 @@ int main(int argc, char *argv[]) { NbRestart = atoi(argv[i+1]); if ((NbRestart < 0)) { masterprint ("Incorrect output number\n"); - PrintUsage (argv[0]); + PrintUsage (argv[0]); } } if (strchr (argv[i], 'D')) { @@ -151,7 +151,7 @@ int main(int argc, char *argv[]) { prs_exit (1); } #endif - + #ifdef MPICUDA EarlyDeviceSelection(); @@ -167,7 +167,7 @@ int main(int argc, char *argv[]) { sprintf (VersionString, "FARGO3D git version %s", xstr(VERSION)); masterprint("\n\n%s\n%s\nSETUP: '%s'\n%s\n\n", sepline, VersionString, xstr(SETUPNAME), sepline); - + if ((ParameterFile[0] == 0) || (argc == 1)) PrintUsage (argv[0]); #ifndef MPICUDA @@ -223,14 +223,18 @@ int main(int argc, char *argv[]) { prs_exit(EXIT_FAILURE); } #endif - + +#ifndef HDF5 ListVariables ("variables.par"); //Writes all variables defined in set up ListVariablesIDL ("IDL.var"); +#endif ChangeArch(); /*Changes the name of the main functions ChangeArch adds _cpu or _gpu if GPU is activated.*/ split(&Gridd); /*Split mesh over PEs*/ InitSpace(); +#ifndef HDF5 WriteDim(); +#endif InitSurfaces(); LightGlobalDev(); /* Copy light arrays to the device global memory */ CreateFields(); // Allocate all fields. @@ -245,14 +249,14 @@ the target velocity in Stockholm's damping prescription. We copy the value above *after* rescaling, and after any initial correction to OMEGAFRAME (which is used afterwards to build the initial Vx field. */ - + if(Restart == YES || Restart_Full == YES) { CondInit (); //Needed even for restarts: some setups have custom //definitions (eg potential for setup MRI) or custom //scaling laws (eg. setup planetesimalsRT). MULTIFLUID( begin_i = RestartSimulation(NbRestart)); - + if (ThereArePlanets) { PhysicalTime = GetfromPlanetFile (NbRestart, 9, 0); OMEGAFRAME = GetfromPlanetFile (NbRestart, 10, 0); @@ -260,8 +264,10 @@ OMEGAFRAME (which is used afterwards to build the initial Vx field. */ } } else { +#ifndef HDF5 if (ThereArePlanets) EmptyPlanetSystemFiles (); +#endif CondInit(); // Initialize set up // Note: CondInit () must be called only ONCE (otherwise some // custom scaling laws may be applied several times). @@ -270,7 +276,7 @@ OMEGAFRAME (which is used afterwards to build the initial Vx field. */ if (StretchOldOutput == YES) { StretchOutput (StretchNumber); } - + MULTIFLUID(comm(ENERGY)); //Very important for isothermal cases! /* This must be placed ***after*** reading the input files in case of a restart */ @@ -300,6 +306,28 @@ if (*SPACING=='N'){ prs_exit (1); #endif +#ifdef HDF5 + if (SetupOutputHdf5() != 0) { + mastererr("HDF5 output initialization failed!\n"); + exit(EXIT_FAILURE); + } + + if (WriteDomainHdf5() != 0) { + mastererr("HDF5 domain write failed!\n"); + exit(EXIT_FAILURE); + } + + if (WriteParametersHdf5() != 0) { + mastererr("HDF5 parameters write failed!\n"); + exit(EXIT_FAILURE); + } + + if (ThereArePlanets && (WritePlanetsHdf5() != 0)) { + mastererr("HDF5 planets write failed!\n"); + exit(EXIT_FAILURE); + } +#endif + GetHostsList (); DumpToFargo3drc(argc, argv); @@ -307,46 +335,54 @@ if (*SPACING=='N'){ #ifdef LONGSUMMARY ExtractFromExecutable (NO, ArchFile, 2); #endif - + MULTIFLUID(FillGhosts(PrimitiveVariables())); - -#ifdef STOCKHOLM + +#ifdef STOCKHOLM FARGO_SAFE(init_stockholm()); //ALREADY IMPLEMENTED MULTIFLUID COMPATIBILITY #endif - + #ifdef GHOSTSX masterprint ("\n\nNew version with ghost zones in X activated\n"); #else masterprint ("Standard version with no ghost zones in X\n"); #endif - + for (i = begin_i; i<=NTOT; i++) { // MAIN LOOP if (NINTERM * (TimeStep = (i / NINTERM)) == i) { #if defined(MHD) && defined(DEBUG) FARGO_SAFE(ComputeDivergence(Bx, By, Bz)); #endif +#ifndef HDF5 if (ThereArePlanets) - WritePlanetSystemFile(TimeStep, NO); - + WritePlanetSystemFile(TimeStep, NO); +#endif + #ifndef NOOUTPUTS - MULTIFLUID(WriteOutputs(ALL)); - +#ifdef HDF5 + WriteOutputsHdf5(); +#else + MULTIFLUID(WriteOutputs(ALL)); +#endif + #ifdef MATPLOTLIB Display(); #endif - + if(CPU_Master) printf("OUTPUTS %d at date t = %f OK\n", TimeStep, PhysicalTime); #endif - + if (TimeInfo == YES) GiveTimeInfo (TimeStep); } - + if (NSNAP != 0) { if (NSNAP * (TimeStep = (i / NSNAP)) == i) { - MULTIFLUID(WriteOutputs(SPECIFIC)); +#ifndef HDF5 + MULTIFLUID(WriteOutputs(SPECIFIC)); +#endif #ifdef MATPLOTLIB Display(); #endif @@ -355,11 +391,11 @@ if (*SPACING=='N'){ if (i==NTOT) break; - + dtemp = 0.0; - + while (dtempDT) dt = DT - (dtemp-dt); //updating dt //------------------------------------------------------------------------ - + //------------------------------------------------------------------------ /* We now compute the total density of the mesh. We need first reset an array and then fill it by adding the density of each fluid */ - FARGO_SAFE(Reset_field(Total_Density)); - MULTIFLUID(ComputeTotalDensity()); + FARGO_SAFE(Reset_field(Total_Density)); + MULTIFLUID(ComputeTotalDensity()); //------------------------------------------------------------------------ - + #ifdef COLLISIONPREDICTOR FARGO_SAFE(Collisions(0.5*dt, 0)); // 0 --> V is used and we update v_half. #endif - + MULTIFLUID(Sources(dt)); //v_half is used in the R.H.S #ifdef DRAGFORCE @@ -417,7 +453,7 @@ if (*SPACING=='N'){ #ifdef DUSTDIFFUSION FARGO_SAFE(DustDiffusion_Main(dt)); #endif - + MULTIFLUID(Transport(dt)); PhysicalTime+=dt; @@ -446,24 +482,33 @@ if (*SPACING=='N'){ FullArrayComms = 0; ContourComms = 0; } - + if(CPU_Master) printf("%s", "\n"); - + +#ifndef HDF5 MULTIFLUID(MonitorGlobal (MONITOR2D | \ - MONITORY | \ - MONITORY_RAW | \ - MONITORSCALAR | \ - MONITORZ | \ - MONITORZ_RAW)); + MONITORY | \ + MONITORY_RAW | \ + MONITORSCALAR | \ + MONITORZ | \ + MONITORZ_RAW)); +#endif if (ThereArePlanets) { +#ifdef HDF5 + WritePlanetsHdf5(); +#else WritePlanetSystemFile(TimeStep, YES); SolveOrbits (Sys); +#endif } } - + +#ifdef HDF5 + TeardownOutputHdf5(); +#endif MPI_Finalize(); - + masterprint("End of the simulation!\n"); - return 0; + return 0; } diff --git a/src/makefile b/src/makefile index 8977418..d1a17c0 100644 --- a/src/makefile +++ b/src/makefile @@ -126,15 +126,15 @@ endif #--------------------------------------------------------------------- #LINUX PLATFORM (GENERIC, default) #FARGO_ARCH must be set to LINUX -CC_LINUX = gcc +CC_LINUX = gcc SEQOPT_LINUX = -O3 PARAOPT_LINUX = ${SEQOPT_LINUX} PARACC_LINUX = mpicc LIBS_LINUX = -lm -INC_LINUX = +INC_LINUX = NVCC_LINUX = nvcc CUDAOPT_LINUX = -O3 -w #-arch=sm_XX # try utils/cuda/get_cuda_sm.sh -PARAINC_LINUX = +PARAINC_LINUX = PARALIB_LINUX = CUDAINC_LINUX = -I${CUDA}/include/ CUDALIB_LINUX = -L${CUDA}/lib64/ -lcudart -lstdc++ @@ -143,16 +143,16 @@ CXXPARA_LINUX = mpic++ -stdlib=libstdc++ #--------------------------------------------------------------------- #KEPLER PLATFORM WITH OPENMPI-1.7.4 AT UNAM #FARGO_ARCH must be set to KEPLEROPENMPI -CC_KEPLEROPENMPI = gcc +CC_KEPLEROPENMPI = gcc SEQOPT_KEPLEROPENMPI = -O3 -ffast-math PARAOPT_KEPLEROPENMPI = ${SEQOPT_KEPLEROPENMPI} PARACC_KEPLEROPENMPI = mpicc LIBS_KEPLEROPENMPI = -lm ENVRANK_KEPLEROPENMPI = OMPI_COMM_WORLD_LOCAL_RANK -INC_KEPLEROPENMPI = +INC_KEPLEROPENMPI = NVCC_KEPLEROPENMPI = nvcc CUDAOPT_KEPLEROPENMPI = -O3 -arch=sm_35 -PARAINC_KEPLEROPENMPI = +PARAINC_KEPLEROPENMPI = PARALIB_KEPLEROPENMPI = CUDAINC_KEPLEROPENMPI = -I${CUDA}/include/ CUDALIB_KEPLEROPENMPI = -L${CUDA}/lib64/ -lcudart @@ -161,17 +161,17 @@ CXXPARA_KEPLEROPENMPI = mpic++ -stdlib=libstdc++ #--------------------------------------------------------------------- #KEPLER PLATFORM WITH MVAPICH2-2.0b AT UNAM #FARGO_ARCH must be set to KEPLERMVAPICH -CC_KEPLERMVAPICH = gcc +CC_KEPLERMVAPICH = gcc SEQOPT_KEPLERMVAPICH = -O3 -ffast-math PARAOPT_KEPLERMVAPICH = ${SEQOPT_KEPLERMVAPICH} PARACC_KEPLERMVAPICH = mpicc LIBS_KEPLERMVAPICH = -lm ENVRANK_KEPLERMVAPICH = MV2_COMM_WORLD_LOCAL_RANK -INC_KEPLERMVAPICH = +INC_KEPLERMVAPICH = NVCC_KEPLERMVAPICH = nvcc CUDAOPT_KEPLERMVAPICH = -O3 -arch=sm_35 -PARAINC_KEPLERMVAPICH = -PARALIB_KEPLERMVAPICH = +PARAINC_KEPLERMVAPICH = +PARALIB_KEPLERMVAPICH = CUDAINC_KEPLERMVAPICH = -I${CUDA}/include/ CUDALIB_KEPLERMVAPICH = -L${CUDA}/lib64/ -lcudart -L/share/apps/mvapich2/lib CXX_KEPLERMVAPICH = g++ -stdlib=libstdc++ @@ -179,16 +179,16 @@ CXXPARA_KEPLERMVAPICH = mpic++ -stdlib=libstdc++ #--------------------------------------------------------------------- #TESLA PLATFORM WITH OPENMPI-1.7.2 AT UNAM #FARGO_ARCH must be set to TESLAOPENMPI -CC_TESLAOPENMPI = gcc +CC_TESLAOPENMPI = gcc SEQOPT_TESLAOPENMPI = -O3 -ffast-math PARAOPT_TESLAOPENMPI = ${SEQOPT_TESLAOPENMPI} PARACC_TESLAOPENMPI = mpicc LIBS_TESLAOPENMPI = -lm ENVRANK_TESLAOPENMPI = OMPI_COMM_WORLD_LOCAL_RANK -INC_TESLAOPENMPI = +INC_TESLAOPENMPI = NVCC_TESLAOPENMPI = nvcc CUDAOPT_TESLAOPENMPI = -O3 -arch=sm_20 -PARAINC_TESLAOPENMPI = +PARAINC_TESLAOPENMPI = PARALIB_TESLAOPENMPI = CUDAINC_TESLAOPENMPI = -I${CUDA}/include/ CUDALIB_TESLAOPENMPI = -L${CUDA}/lib64/ -lcudart @@ -197,16 +197,16 @@ CXXPARA_TESLAOPENMPI = mpic++ #--------------------------------------------------------------------- #TESLA PLATFORM WITH MVAPICH2-1.9 AT UNAM #FARGO_ARCH must be set to TESLAMVAPICH -CC_TESLAMVAPICH = gcc +CC_TESLAMVAPICH = gcc SEQOPT_TESLAMVAPICH = -O3 -ffast-math PARAOPT_TESLAMVAPICH = ${SEQOPT_TESLAMVAPICH} PARACC_TESLAMVAPICH = mpicc LIBS_TESLAMVAPICH = -lm ENVRANK_TESLAMVAPICH = MV2_COMM_WORLD_LOCAL_RANK -INC_TESLAMVAPICH = +INC_TESLAMVAPICH = NVCC_TESLAMVAPICH = nvcc CUDAOPT_TESLAMVAPICH = -O3 -arch=sm_20 -PARAINC_TESLAMVAPICH = +PARAINC_TESLAMVAPICH = PARALIB_TESLAMVAPICH = -lmpich -lOpenCL CUDAINC_TESLAMVAPICH = -I${CUDA}/include/ CUDALIB_TESLAMVAPICH = -lcudart @@ -215,16 +215,16 @@ CXXPARA_TESLAMVAPICH = mpic++ -stdlib=libstdc++ #--------------------------------------------------------------------- #TESLA C2070 PLATFORM WITH MVAPICH2-1.9 AT CORDOBA #FARGO_ARCH must be set to MENDIETA -CC_MENDIETA = gcc +CC_MENDIETA = gcc SEQOPT_MENDIETA = -O3 -ffast-math PARAOPT_MENDIETA = ${SEQOPT_MENDIETA} PARACC_MENDIETA = mpicc LIBS_MENDIETA = -lm ENVRANK_MENDIETA = MV2_COMM_WORLD_RANK -INC_MENDIETA = +INC_MENDIETA = NVCC_MENDIETA = nvcc CUDAOPT_MENDIETA = -O3 -arch=sm_20 -PARAINC_MENDIETA = +PARAINC_MENDIETA = PARALIB_MENDIETA = -lmpich -lOpenCL CUDAINC_MENDIETA = -I${CUDA}/include/ CUDALIB_MENDIETA = -lcudart @@ -234,13 +234,13 @@ CXXPARA_MENDIETA = mpic++ -stdlib=libstdc++ #QUADRO PLATFORM (IATE COMPUTER) #FARGO_ARCH must be set to QUADRO CC_QUADRO = gcc -SEQOPT_QUADRO = -O3 #-ffast-math +SEQOPT_QUADRO = -O3 #-ffast-math PARAOPT_QUADRO = ${SEQOPT_QUADRO} PARACC_QUADRO = mpicc LIBS_QUADRO = -lm -INC_QUADRO = +INC_QUADRO = NVCC_QUADRO = ${CUDA}/bin/nvcc -arch=sm_20 -CUDAOPT_QUADRO = +CUDAOPT_QUADRO = PARAINC_QUADRO = -I/usr/include/openmpi-x86_64/ PARALIB_QUADRO = -L/usr/lib64/openmpi/lib/ CUDALIB_QUADRO = -L${CUDA}/lib64/ -lcudart @@ -256,12 +256,12 @@ SEQOPT_MacIntel = -O3 -arch x86_64 -Wmissing-prototypes #-Wall -Wextra PARAOPT_MacIntel = ${SEQOPT_MacIntel} PARACC_MacIntel = mpicc ENVRANK_MacIntel = OMPI_COMM_WORLD_RANK -LIBS_MacIntel = -INC_MacIntel = +LIBS_MacIntel = +INC_MacIntel = NVCC_MacIntel = ${CUDA}/bin/nvcc CUDAOPT_MacIntel = -arch=sm_30 -m64 -O3 --compiler-options '-Wno-return-type-c-linkage' -Xcudafe --diag_suppress=set_but_not_used -Xcudafe --diag_suppress=declared_but_not_referenced -PARAINC_MacIntel = -PARALIB_MacIntel = +PARAINC_MacIntel = +PARALIB_MacIntel = CUDALIB_MacIntel = -L${CUDA}/lib/ -lcudart CUDAINC_MacIntel = -I${CUDA}/include/ CXX_MacIntel = g++ -stdlib=libstdc++ @@ -369,7 +369,7 @@ MAINOBJ = LowTasks.o psys.o main.o param.o var.o usage.o \ mesh.o ram_advect.o ram_advect_lin.o ram_compute_ustar.o ram_plm.o #-----------------------#Cuda objects--------------------------------- GPU_OBJ = reduction_sum_device.o reduction_min_device.o \ - lowtasks_gpu.o + lowtasks_gpu.o # #Do not remove!!! @@ -457,6 +457,12 @@ else MAINOBJ += ${MPIDUMMY} COMPILER = ${CC} endif + +ifeq (-DHDF5,${findstring -DHDF5,${FARGO_OPT}}) + LIBS += -lhdf5 + MAINOBJ += output_hdf5.o +endif + LINKER = ${COMPILER} @@ -469,7 +475,7 @@ endif #--------------------------------------------------------------------- #Adding includes and objects if MATPLOTLIB is activated. -#These lines prevent compatibility problems +#These lines prevent compatibility problems #if matplotlib is not installed ifeq (MATPLOTLIB,${findstring MATPLOTLIB,${FARGO_DISPLAY}}) FARGO_OPT += -DMATPLOTLIB @@ -477,7 +483,7 @@ ifeq (MATPLOTLIB,${findstring MATPLOTLIB,${FARGO_DISPLAY}}) OBJECTS += matplotlib.o LIBS += -lpython3.8 endif -# Debug mode if FARGO_DEBUG = 1, Note that this mode is only +# Debug mode if FARGO_DEBUG = 1, Note that this mode is only # meant to execute gdb (or lldb, etc.) or valgrind. ifeq (${FARGO_DEBUG}, 1) OPTIONS = -g @@ -514,7 +520,7 @@ ifeq (${FARGO_GPU}, 1) @echo "a CPU with a GPU card (1 GPU only)." endif endif -ifeq (MATPLOTLIB,${findstring MATPLOTLIB,${FARGO_DISPLAY}}) +ifeq (MATPLOTLIB,${findstring MATPLOTLIB,${FARGO_DISPLAY}}) @echo "" @echo "This built has a graphical output," @echo "which uses Python's matplotlib library." @@ -635,7 +641,7 @@ collisions.cu: collisions_template.cu ${GLOBAL} %.cu: %.c @echo C2CUDA $< "==>" $*.cu - @${PYTHON} ${SCRIPTSDIR}/c2cuda.py -i $< -o $*.cu -s ${SETUP} + @${PYTHON} ${SCRIPTSDIR}/c2cuda.py -i $< -o $*.cu -s ${SETUP} reduction_sum_device.o: reduction_sum_device.cu @echo NVCC $< "==>" $*.o diff --git a/src/mpi_dummy.c b/src/mpi_dummy.c index 716877d..474f42b 100644 --- a/src/mpi_dummy.c +++ b/src/mpi_dummy.c @@ -15,6 +15,8 @@ void MPI_Comm_rank (int a, int *b) {*b = 0;} /* Only one process, with rank zero void MPI_Comm_size (int a, int *b) {*b = 1;} /* Only one process in the world communicator... */ +void MPI_Comm_dup(MPI_Comm a, MPI_Comm *b) { *b = a; } + void MPI_Init (int *argc, char **argv[]) { fprintf (stderr, "\n !!!! WARNING !!!!\n\n"); fprintf (stderr, "This is a sequential built of the %s code\n", *argv[0]); @@ -88,4 +90,6 @@ void MPI_Recv(){} void MPI_Barrier(){} void MPI_Wait(){} void MPI_Scan(){} //In place scans require no special action +void MPI_Exscan(){} void MPI_Comm_split(){} +void MPI_Comm_free(MPI_Comm *){} diff --git a/src/mpi_dummy.h b/src/mpi_dummy.h index 337456d..bde7327 100644 --- a/src/mpi_dummy.h +++ b/src/mpi_dummy.h @@ -4,6 +4,7 @@ #define MPI_FLOAT 4 #define MPI_CHAR 1 #define MPI_LONG 3 +#define MPI_UNSIGNED_LONG 5 #define MPI_INT 0 #define MPI_MIN 0 #define MPI_MAX 0 @@ -19,7 +20,10 @@ typedef long MPI_Offset; void MPI_Comm_rank(); void MPI_Barrier(); void MPI_Comm_size(); +void MPI_Comm_dup(MPI_Comm, MPI_Comm*); +void MPI_Comm_free(MPI_Comm*); void MPI_Scan(); +void MPI_Exscan(); void MPI_Comm_split(); void MPI_Init(); void MPI_Finalize(); diff --git a/src/output_hdf5.c b/src/output_hdf5.c new file mode 100644 index 0000000..a669930 --- /dev/null +++ b/src/output_hdf5.c @@ -0,0 +1,945 @@ +#include +#include +#include +#include +#include + +#include "fargo3d.h" + +extern int Id_Var; +extern Param Var_Set[]; + +#ifdef FLOAT +/* single-precision mode */ +#define REALTYPE (H5T_NATIVE_FLOAT) +#else +/* double-precision mode */ +#define REALTYPE (H5T_NATIVE_DOUBLE) +#endif + +#define FIELDS_PER_PLANET (8) + +static int fields_per_fluid = 2; +static int static_fields_per_fluid = 1; + +static struct { + MPI_Comm world_comm; + + hid_t file_id; /* file handle */ + hid_t xfpl_id; /* data transfer prop list */ + + struct { + hid_t group_id; + hid_t memspace_id; + hid_t **dataset_ids; /* planet output dataset handles */ + } fluids; + +#ifdef STOCKHOLM + struct { + hid_t group_id; /* stockholm group handle */ + hid_t **dataset_ids; /* stockholm dataset handles */ + hid_t memspace_id; + } stockholm; /* handles for stockholm boundary output */ +#endif + + struct { + hid_t dtype_id; /* custom datatype for planets */ + hid_t memspace_id; /* memory dataspace handle */ + hid_t *dataset_ids; /* planet output dataset handles */ + } planets; /* handles for planet output */ + + hsize_t global_file_dims[4]; + hsize_t global_file_maxdims[4]; + hsize_t local_mem_dims[4]; + hsize_t write_dims[4]; + hsize_t ghost_dims[4]; + hsize_t chunk_dims[4]; + + hsize_t global_file_start[4]; + hsize_t global_file_ghost_start[4]; +} hdf5; + +static int WriteFieldStatic(hid_t dset, void *buffer); +static int WriteFieldTimeDep(hid_t dset, void *buffer); + +static int WriteRealAttribute(const char *name, real val); +static int WriteIntAttribute(const char *name, int val); +static int WriteBoolAttribute(const char *name, boolean val); +static int WriteStringAttribute(const char *name, const char *val); + +int SetupOutputHdf5() { + size_t len; + char *fname; + int rank = 0; + + htri_t avail; /* flag to check for plugins */ + hid_t fapl_id; /* file access prop list */ + + hid_t fluids_dcpl_id; /* dataset creation prop list */ + hid_t fluids_group_id; + hid_t fluids_filespace_id; + hid_t *fluids_subgroup_ids; /* group handles for fluid fields */ +#ifdef STOCKHOLM + hid_t stockholm_dcpl_id; + hid_t stockholm_filespace_id; + hid_t *stockholm_subgroup_ids; /* group handles for stockholm fields */ +#endif + +#ifdef Z + fields_per_fluid += 1; + static_fields_per_fluid += 1; +#endif +#ifdef Y + fields_per_fluid += 1; + static_fields_per_fluid += 1; +#endif +#ifdef X + fields_per_fluid += 1; + static_fields_per_fluid += 1; +#endif +#ifdef ADIABATIC + static_fields_per_fluid += 1; +#endif + + MPI_Comm_dup(MPI_COMM_WORLD, &(hdf5.world_comm)); + + /* form the output filename from the directory and a user-supplied tag */ + len = strlen(OUTPUTDIR) + strlen(FILETAG) + 5; + fname = malloc(len * sizeof(char)); + snprintf(fname, len, "%s/%s.h5", OUTPUTDIR, FILETAG); + + fapl_id = H5Pcreate(H5P_FILE_ACCESS); + hdf5.xfpl_id = H5Pcreate(H5P_DATASET_XFER); +#ifdef PARALLEL + /* if we're working in parallel, we need to tell the library; this allows + * writing to the same file from multiple processes */ + H5Pset_fapl_mpio(fapl_id, hdf5.world_comm, MPI_INFO_NULL); + H5Pset_dxpl_mpio(hdf5.xfpl_id, H5FD_MPIO_COLLECTIVE); +#endif + H5Pset_libver_bounds(fapl_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST); + + /* create the file handle */ + hdf5.file_id = H5Fcreate(fname, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + free(fname); + if (hdf5.file_id < 0) return -1; + + /* set up for fluid output */ + fluids_group_id = H5Gcreate(hdf5.file_id, + "fluids", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + if (hdf5.fluids.group_id < 0) return -1; + + /* 0. add a time dimension */ + hdf5.global_file_dims[rank] = 0; + hdf5.global_file_maxdims[rank] = H5S_UNLIMITED; + hdf5.chunk_dims[rank] = 1; + hdf5.local_mem_dims[rank] = 1; + hdf5.write_dims[rank] = 1; + hdf5.ghost_dims[rank] = 0; + rank += 1; + + /* 1. if enabled, add z dimension */ +#ifdef Z + hdf5.global_file_dims[rank] = NZ; + hdf5.write_dims[rank] = Nz; + hdf5.ghost_dims[rank] = Nz + NGHZ * (Gridd.K == 0) + + NGHZ * (Gridd.K + 1 == Gridd.NK); +#ifdef WRITEGHOSTS + hdf5.global_file_dims[rank] += 2 * NGHZ; + hdf5.write_dims[rank] += NGHZ * (Gridd.K == 0) + + NGHZ * (Gridd.K + 1 == Gridd.NK); +#endif + hdf5.global_file_maxdims[rank] = + hdf5.chunk_dims[rank] = hdf5.global_file_dims[rank]; + hdf5.local_mem_dims[rank] = Nz + 2 * NGHZ; + rank += 1; +#endif // Z + + /* 2. if enabled, add y dimension */ +#ifdef Y + hdf5.global_file_dims[rank] = NY; + hdf5.write_dims[rank] = Ny; + hdf5.ghost_dims[rank] = Ny + NGHY * (Gridd.J == 0) + + NGHY * (Gridd.J + 1 == Gridd.NJ); +#ifdef WRITEGHOSTS + hdf5.global_file_dims[rank] += 2 * NGHY; + hdf5.write_dims[rank] += NGHY * (Gridd.J == 0) + + NGHY * (Gridd.J + 1 == Gridd.NJ); +#endif + hdf5.global_file_maxdims[rank] = + hdf5.chunk_dims[rank] = hdf5.global_file_dims[rank]; + hdf5.local_mem_dims[rank] = Ny + 2 * NGHY; + rank += 1; +#endif // Y + + /* 3. if enabled, add x dimension; note that the x dimension isn't + * split over multiple ranks */ +#ifdef X + hdf5.global_file_dims[rank] = NX; + hdf5.write_dims[rank] = Nx; + hdf5.ghost_dims[rank] = Nx + 2 * NGHX; +#ifdef WRITEGHOSTS + hdf5.global_file_dims[rank] += 2 * NGHX; + hdf5.write_dims[rank] += 2 * NGHX; +#endif + hdf5.global_file_maxdims[rank] = + hdf5.chunk_dims[rank] = hdf5.global_file_dims[rank]; + hdf5.local_mem_dims[rank] = Nx + 2 * NGHX; + rank += 1; +#endif // X + + /* filespace expands with time, memspace does not */ + fluids_filespace_id = H5Screate_simple(rank, + hdf5.global_file_dims, hdf5.global_file_maxdims); + hdf5.fluids.memspace_id = H5Screate_simple(rank, hdf5.local_mem_dims, NULL); + +#ifdef STOCKHOLM + /* stockholm output isn't time-dependent */ + stockholm_filespace_id = H5Screate_simple(rank - 2, + hdf5.global_file_dims + 1, hdf5.global_file_maxdims + 1); + hdf5.stockholm.memspace_id = H5Screate_simple(rank - 2, + hdf5.local_mem_dims + 1, NULL); +#endif + + /* unlimited-dimension datasets **must** be chunked */ + fluids_dcpl_id = H5Pcreate(H5P_DATASET_CREATE); + H5Pset_chunk(fluids_dcpl_id, rank, hdf5.chunk_dims); + avail = H5Zfilter_avail(H5Z_FILTER_DEFLATE); + if (avail && (COMPRESSLEVEL > 0)) { + /* conditionally enable compression */ + H5Pset_deflate(fluids_dcpl_id, COMPRESSLEVEL); + } + +#ifdef STOCKHOLM + stockholm_dcpl_id = H5Pcreate(H5P_DATASET_CREATE); + H5Pset_chunk(stockholm_dcpl_id, rank - 2, hdf5.chunk_dims + 1); + avail = H5Zfilter_avail(H5Z_FILTER_DEFLATE); + if (avail && (COMPRESSLEVEL > 0)) { + /* conditionally enable compression */ + H5Pset_deflate(stockholm_dcpl_id, COMPRESSLEVEL); + } +#endif + + /* allocate memory to store group handles */ + fluids_subgroup_ids = malloc(NFLUIDS * sizeof(hid_t)); + if (fluids_subgroup_ids == NULL) return -1; +#ifdef STOCKHOLM + stockholm_subgroup_ids = malloc(NFLUIDS * sizeof(hid_t)); + if (stockholm_subgroup_ids == NULL) return -1; +#endif + + /* allocate memory to store dataset handles */ + hdf5.fluids.dataset_ids = malloc(NFLUIDS * sizeof(hid_t *)); + if (hdf5.fluids.dataset_ids == NULL) return -1; +#ifdef STOCKHOLM + hdf5.stockholm.dataset_ids = malloc(NFLUIDS * sizeof(hid_t *)); + if (hdf5.stockholm.dataset_ids == NULL) return -1; +#endif + + for (int f = 0; f < NFLUIDS; ++f) { + int d; /* local counter for datasets */ + hid_t gid; /* local group handle */ + hid_t subgid; /* local subgroup handle */ + + /* create a group for each fluid */ + gid = H5Gcreate(fluids_group_id, Fluids[f]->name, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + if (gid < 0) return -1; + + /* allocate memory for dataset handles */ + hdf5.fluids.dataset_ids[f] = malloc(fields_per_fluid * sizeof(hid_t)); + + d = 0; /* create a dataset for each field */ + hdf5.fluids.dataset_ids[f][d++] = H5Dcreate(gid, "dens", REALTYPE, + fluids_filespace_id, H5P_DEFAULT, fluids_dcpl_id, H5P_DEFAULT); +#ifdef Z + hdf5.fluids.dataset_ids[f][d++] = H5Dcreate(gid, "zvel", REALTYPE, + fluids_filespace_id, H5P_DEFAULT, fluids_dcpl_id, H5P_DEFAULT); +#endif +#ifdef Y + hdf5.fluids.dataset_ids[f][d++] = H5Dcreate(gid, "yvel", REALTYPE, + fluids_filespace_id, H5P_DEFAULT, fluids_dcpl_id, H5P_DEFAULT); +#endif +#ifdef X + hdf5.fluids.dataset_ids[f][d++] = H5Dcreate(gid, "xvel", REALTYPE, + fluids_filespace_id, H5P_DEFAULT, fluids_dcpl_id, H5P_DEFAULT); +#endif + hdf5.fluids.dataset_ids[f][d++] = H5Dcreate(gid, "ener", REALTYPE, + fluids_filespace_id, H5P_DEFAULT, fluids_dcpl_id, H5P_DEFAULT); + +#ifdef STOCKHOLM + d = 0; /* stockholm group within each fluid */ + subgid = H5Gcreate(gid, "stockholm", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + if (subgid < 0) return -1; + + /* allocate memory for dataset handles */ + hdf5.stockholm.dataset_ids[f] = malloc(static_fields_per_fluid * sizeof(hid_t)); + + hdf5.stockholm.dataset_ids[f][d++] = H5Dcreate(subgid, "dens", REALTYPE, + stockholm_filespace_id, H5P_DEFAULT, stockholm_dcpl_id, H5P_DEFAULT); +#ifdef Z + hdf5.stockholm.dataset_ids[f][d++] = H5Dcreate(subgid, "zvel", REALTYPE, + stockholm_filespace_id, H5P_DEFAULT, stockholm_dcpl_id, H5P_DEFAULT); +#endif +#ifdef Y + hdf5.stockholm.dataset_ids[f][d++] = H5Dcreate(subgid, "yvel", REALTYPE, + stockholm_filespace_id, H5P_DEFAULT, stockholm_dcpl_id, H5P_DEFAULT); +#endif +#ifdef X + hdf5.stockholm.dataset_ids[f][d++] = H5Dcreate(subgid, "xvel", REALTYPE, + stockholm_filespace_id, H5P_DEFAULT, stockholm_dcpl_id, H5P_DEFAULT); +#endif +#ifdef ADIABATIC + hdf5.stockholm.dataset_ids[f][d++] = H5Dcreate(subgid, "ener", REALTYPE, + stockholm_filespace_id, H5P_DEFAULT, stockholm_dcpl_id, H5P_DEFAULT); +#endif + + H5Gclose(subgid); /* close temporary handle */ +#endif // STOCKHOLM + + H5Gclose(gid); /* close temporary handle */ + } + + /* can close these handles, datasets are created */ + H5Pclose(fapl_id); + H5Pclose(fluids_dcpl_id); + H5Gclose(fluids_group_id); + H5Sclose(fluids_filespace_id); +#ifdef STOCKHOLM + H5Pclose(stockholm_dcpl_id); + H5Sclose(stockholm_filespace_id); +#endif + + if (ThereArePlanets && (Sys != NULL)) { + size_t sz, count = 0; + hid_t planets_filespace_id, group_id, dcpl_id; + hsize_t chunkdim = 1, dim = 0, maxdim = H5S_UNLIMITED; + + group_id = H5Gcreate(hdf5.file_id, "planets", + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + if (group_id < 0) return -1; + + dcpl_id = H5Pcreate(H5P_DATASET_CREATE); + H5Pset_chunk(dcpl_id, 1, &chunkdim); + avail = H5Zfilter_avail(H5Z_FILTER_DEFLATE); + if (avail && (COMPRESSLEVEL > 0)) { + H5Pset_deflate(dcpl_id, COMPRESSLEVEL); + } + + /* planet data is essentially a big array of "scalars" (actually + * compound structs), so only need rank 1 */ + planets_filespace_id = H5Screate_simple(1, &dim, &maxdim); + hdf5.planets.memspace_id = H5Screate(H5S_SCALAR); + + sz = H5Tget_size(REALTYPE); + hdf5.planets.dtype_id = H5Tcreate(H5T_COMPOUND, FIELDS_PER_PLANET * sz); + + H5Tinsert(hdf5.planets.dtype_id, "t", sz * (count++), REALTYPE); + H5Tinsert(hdf5.planets.dtype_id, "mass", sz * (count++), REALTYPE); + + H5Tinsert(hdf5.planets.dtype_id, "x", sz * (count++), REALTYPE); + H5Tinsert(hdf5.planets.dtype_id, "y", sz * (count++), REALTYPE); + H5Tinsert(hdf5.planets.dtype_id, "z", sz * (count++), REALTYPE); + + H5Tinsert(hdf5.planets.dtype_id, "xvel", sz * (count++), REALTYPE); + H5Tinsert(hdf5.planets.dtype_id, "yvel", sz * (count++), REALTYPE); + H5Tinsert(hdf5.planets.dtype_id, "zvel", sz * (count++), REALTYPE); + + /* create a dataset per planet in the system */ + hdf5.planets.dataset_ids = malloc(Sys->nb * sizeof(hid_t)); + if (hdf5.planets.dataset_ids == NULL) return -1; + + for (int pl = 0; pl < Sys->nb; ++pl) { + hdf5.planets.dataset_ids[pl] = H5Dcreate(group_id, Sys->name[pl], + hdf5.planets.dtype_id, planets_filespace_id, H5P_DEFAULT, + dcpl_id, H5P_DEFAULT); + } + + /* close temporary handles */ + H5Pclose(dcpl_id); + H5Gclose(group_id); + H5Sclose(planets_filespace_id); + } + + { + /* a little trick for determining which indices should be written + * by each processor, using the grid indices that have already been + * assigned and an exclusive sum */ + int rank = 0; + MPI_Comm tmp_comm; + + /* time dimension */ + hdf5.global_file_start[rank] = 0; + hdf5.global_file_ghost_start[rank] = 0; + rank += 1; + +#ifdef Z + MPI_Comm_split(hdf5.world_comm, Gridd.J, CPU_Rank, &tmp_comm); + MPI_Exscan(&(hdf5.write_dims[rank]), &(hdf5.global_file_start[rank]), + 1, MPI_UNSIGNED_LONG, MPI_SUM, tmp_comm); + MPI_Exscan(&(hdf5.ghost_dims[rank]), &(hdf5.global_file_ghost_start[rank]), + 1, MPI_UNSIGNED_LONG, MPI_SUM, tmp_comm); + MPI_Comm_free(&tmp_comm); + rank += 1; +#endif + +#ifdef Y + MPI_Comm_split(hdf5.world_comm, Gridd.K, CPU_Rank, &tmp_comm); + MPI_Exscan(&(hdf5.write_dims[rank]), &(hdf5.global_file_start[rank]), + 1, MPI_UNSIGNED_LONG, MPI_SUM, tmp_comm); + MPI_Exscan(&(hdf5.ghost_dims[rank]), &(hdf5.global_file_ghost_start[rank]), + 1, MPI_UNSIGNED_LONG, MPI_SUM, tmp_comm); + MPI_Comm_free(&tmp_comm); + rank += 1; +#endif + +#ifdef X + hdf5.global_file_start[rank] = 0; + hdf5.global_file_ghost_start[rank] = 0; + rank += 1; +#endif + } + + return 0; +} + +int WriteDomainHdf5() { + hid_t filespace_id, memspace_id; + hid_t group_id, dataset_id, dcpl_id; + hsize_t memsz, memst, memct, filesz; + + int rank = 1; + + MPI_Barrier(hdf5.world_comm); + + group_id = H5Gcreate(hdf5.file_id, + "domain", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + if (group_id < 0) return -1; + +#ifdef Z + /* memory start index, memory count, memory total size, and file total size + * for the z-coordinate data */ + memst = NGHZ * (Gridd.K > 0); + memct = Nz + NGHZ * (Gridd.K == 0) + (NGHZ + 1) * (Gridd.K + 1 == Gridd.NK); + memsz = Nz + 2 * NGHZ + 1; + filesz = NZ + 2 * NGHZ + 1; + + dcpl_id = H5Pcreate(H5P_DATASET_CREATE); + H5Pset_chunk(dcpl_id, 1, &filesz); + + filespace_id = H5Screate_simple(1, &filesz, NULL); + memspace_id = H5Screate_simple(1, &memsz, NULL); + + dataset_id = H5Dcreate(group_id, "z", REALTYPE, + filespace_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + + if (Gridd.J == 0) { + /* only one rank should write this data, otherwise it gets re-written + * for every processor in y */ + H5Sselect_hyperslab(filespace_id, H5S_SELECT_SET, + &(hdf5.global_file_ghost_start[rank]), NULL, &memct, NULL); + H5Sselect_hyperslab(memspace_id, H5S_SELECT_SET, + &memst, NULL, &memct, NULL); + } else { + /* to keep all other processors from writing, since H5Dwrite must be + * collective, just clear the hyperslab selection */ + H5Sselect_none(filespace_id); + H5Sselect_none(memspace_id); + } + + /* do the write operation */ + H5Dwrite(dataset_id, REALTYPE, memspace_id, filespace_id, hdf5.xfpl_id, Zmin); + + H5Pclose(dcpl_id); + H5Dclose(dataset_id); + H5Sclose(memspace_id); + H5Sclose(filespace_id); + + rank += 1; +#endif +#ifdef Y + /* memory start index, memory count, memory total size, and file total size + * for the y-coordinate data */ + memst = NGHY * (Gridd.J > 0); + memct = Ny + NGHY * (Gridd.J == 0) + (NGHY + 1) * (Gridd.J + 1 == Gridd.NJ); + memsz = Ny + 2 * NGHY + 1; + filesz = NY + 2 * NGHY + 1; + + dcpl_id = H5Pcreate(H5P_DATASET_CREATE); + H5Pset_chunk(dcpl_id, 1, &filesz); + + filespace_id = H5Screate_simple(1, &filesz, NULL); + memspace_id = H5Screate_simple(1, &memsz, NULL); + + dataset_id = H5Dcreate(group_id, "y", REALTYPE, + filespace_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + + if (Gridd.K == 0) { + /* only one rank should write this data, otherwise it gets re-written + * for every processor in y */ + H5Sselect_hyperslab(filespace_id, H5S_SELECT_SET, + &(hdf5.global_file_ghost_start[rank]), NULL, &memct, NULL); + H5Sselect_hyperslab(memspace_id, H5S_SELECT_SET, + &memst, NULL, &memct, NULL); + } else { + /* to keep all other processors from writing, since H5Dwrite must be + * collective, just clear the hyperslab selection */ + H5Sselect_none(filespace_id); + H5Sselect_none(memspace_id); + } + + /* do the write operation */ + H5Dwrite(dataset_id, REALTYPE, memspace_id, filespace_id, hdf5.xfpl_id, Ymin); + + H5Pclose(dcpl_id); + H5Dclose(dataset_id); + H5Sclose(memspace_id); + H5Sclose(filespace_id); + + rank += 1; +#endif +#ifdef X + /* memory start index and memory total size for the x-coordinate data */ + memst = 0; + memsz = Nx + 2 * NGHX + 1; + filesz = NX + 2 * NGHX + 1; + + dcpl_id = H5Pcreate(H5P_DATASET_CREATE); + H5Pset_chunk(dcpl_id, 1, &filesz); + + filespace_id = H5Screate_simple(1, &filesz, NULL); + memspace_id = H5Screate_simple(1, &memsz, NULL); + + dataset_id = H5Dcreate(group_id, "x", REALTYPE, + filespace_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + + if ((Gridd.J == 0) && (Gridd.K == 0)) { + /* the x-coordinate isn't split across ranks; limit writing to just + * the (0,0) rank, which is always guaranteed to exist, even for + * sequential runs */ + H5Sselect_hyperslab(filespace_id, H5S_SELECT_SET, + &memst, NULL, &memsz, NULL); + H5Sselect_hyperslab(memspace_id, H5S_SELECT_SET, + &memst, NULL, &memsz, NULL); + } else { + /* to keep all other processors from writing, since H5Dwrite must be + * collective, just clear the hyperslab selection */ + H5Sselect_none(filespace_id); + H5Sselect_none(memspace_id); + } + + H5Dwrite(dataset_id, REALTYPE, memspace_id, filespace_id, hdf5.xfpl_id, Xmin); + + H5Pclose(dcpl_id); + H5Dclose(dataset_id); + H5Sclose(memspace_id); + H5Sclose(filespace_id); + + rank += 1; +#endif + + /* close the group, we're done with it */ + H5Gclose(group_id); + /* make sure all ranks are done writing */ + MPI_Barrier(hdf5.world_comm); + + return 0; +} + +int WriteFieldTimeDep(hid_t dset_id, void *buffer) { + /* Write a time-dependent field (maximum rank 4, time + z + y + x) + * to the open file in a multiprocess-safe way. */ + int err, rank = 0; + hid_t filespace_id; + hsize_t file_dims[4], file_start[4]; + + /* get the "old" dataspace global dimensions */ + filespace_id = H5Dget_space(dset_id); + H5Sget_simple_extent_dims(filespace_id, file_dims, NULL); + H5Sclose(filespace_id); + + /* increase the first dimension by 1 for this timestep */ + file_start[rank] = file_dims[0]; + rank += 1; + file_dims[0] += 1; + H5Dset_extent(dset_id, file_dims); + filespace_id = H5Dget_space(dset_id); + + /* other start indices are from precomputed arrays */ +#ifdef Z + file_start[rank] = hdf5.global_file_start[rank]; + rank += 1; +#endif +#ifdef Y + file_start[rank] = hdf5.global_file_start[rank]; + rank += 1; +#endif +#ifdef X + file_start[rank] = 0; + rank += 1; +#endif + + /* select this processor's slab and write the data */ + H5Sselect_hyperslab(filespace_id, H5S_SELECT_SET, + file_start, NULL, hdf5.write_dims, NULL); + err = H5Dwrite(dset_id, REALTYPE, hdf5.fluids.memspace_id, + filespace_id, hdf5.xfpl_id, buffer); + H5Sclose(filespace_id); + + return err; +} + +int WriteFieldStatic(hid_t dset_id, void *buffer) { + /* Write a time-independent field (maximum rank 2, z + y) + * to the open file in a multiprocess-safe way. This is + * only used for the Stockholm boundary data. */ + int err = 0; +#ifdef STOCKHOLM + int rank = 1; + hid_t filespace_id; + hsize_t file_dims[4], file_start[4]; + + /* get the dataspace but don't modify */ + filespace_id = H5Dget_space(dset_id); + +#ifdef Z + file_start[rank] = hdf5.global_file_start[rank]; + rank += 1; +#endif +#ifdef Y + file_start[rank] = hdf5.global_file_start[rank]; + rank += 1; +#endif + + /* select this processor's slab and write the data */ + H5Sselect_hyperslab(filespace_id, H5S_SELECT_SET, + file_start + 1, NULL, hdf5.write_dims + 1, NULL); + err = H5Dwrite(dset_id, REALTYPE, hdf5.stockholm.memspace_id, + filespace_id, hdf5.xfpl_id, buffer); + H5Sclose(filespace_id); +#endif + return err; +} + +int WriteOutputsHdf5() { + int err, rank = 0; + hsize_t mem_start[4]; + + mem_start[rank++] = 0; +#ifdef Z +#ifdef WRITEGHOSTS + mem_start[rank++] = NGHZ * (Gridd.K > 0); +#else + mem_start[rank++] = NGHZ; +#endif // WRITEGHOSTS +#endif // Z +#ifdef Y +#ifdef WRITEGHOSTS + mem_start[rank++] = NGHY * (Gridd.J > 0); +#else + mem_start[rank++] = NGHY; +#endif // WRITEGHOSTS +#endif // Y +#ifdef X +#ifdef WRITEGHOSTS + mem_start[rank++] = 0; +#else + mem_start[rank++] = NGHX; +#endif // WRITEGHOSTS +#endif // X + + MPI_Barrier(hdf5.world_comm); + H5Sselect_hyperslab(hdf5.fluids.memspace_id, H5S_SELECT_SET, + mem_start, NULL, hdf5.write_dims, NULL); + + for (int f = 0; f < NFLUIDS; ++f) { + int d = 0; /* local dataset counter */ + hid_t *dset_ids = hdf5.fluids.dataset_ids[f]; + + err = WriteFieldTimeDep(dset_ids[d++], Fluids[f]->Density->field_cpu); + if (err < 0) return err; +#ifdef Z + err = WriteFieldTimeDep(dset_ids[d++], Fluids[f]->Vz->field_cpu); + if (err < 0) return err; +#endif +#ifdef Y + err = WriteFieldTimeDep(dset_ids[d++], Fluids[f]->Vy->field_cpu); + if (err < 0) return err; +#endif +#ifdef X + err = WriteFieldTimeDep(dset_ids[d++], Fluids[f]->Vx->field_cpu); + if (err < 0) return err; +#endif + err = WriteFieldTimeDep(dset_ids[d++], Fluids[f]->Energy->field_cpu); + if (err < 0) return err; + } + + /* wait until all ranks finish, then flush to disk */ + MPI_Barrier(hdf5.world_comm); + H5Fflush(hdf5.file_id, H5F_SCOPE_GLOBAL); + + return 0; +} + +int WriteOutputs2dHdf5() { +#ifdef STOCKHOLM + int err, rank = 1; + hsize_t mem_start[4]; + + MPI_Barrier(hdf5.world_comm); + +#ifdef Z +#ifdef WRITEGHOSTS + mem_start[rank++] = NGHZ * (Gridd.K > 0); +#else + mem_start[rank++] = NGHZ; +#endif // WRITEGHOSTS +#endif // Z +#ifdef Y +#ifdef WRITEGHOSTS + mem_start[rank++] = NGHY * (Gridd.J > 0); +#else + mem_start[rank++] = NGHY; +#endif // WRITEGHOSTS +#endif // Y + + H5Sselect_hyperslab(hdf5.stockholm.memspace_id, H5S_SELECT_SET, + mem_start + 1, NULL, hdf5.write_dims + 1, NULL); + + for (int f = 0; f < NFLUIDS; ++f) { + int d = 0; /* local dataset counter */ + hid_t *dset_ids = hdf5.stockholm.dataset_ids[f]; + + err = WriteFieldStatic(dset_ids[d++], Fluids[f]->Density0->field_cpu); + if (err < 0) return err; +#ifdef Z + err = WriteFieldStatic(dset_ids[d++], Fluids[f]->Vz0->field_cpu); + if (err < 0) return err; +#endif +#ifdef Y + err = WriteFieldStatic(dset_ids[d++], Fluids[f]->Vy0->field_cpu); + if (err < 0) return err; +#endif +#ifdef X + err = WriteFieldStatic(dset_ids[d++], Fluids[f]->Vx0->field_cpu); + if (err < 0) return err; +#endif +#ifdef ADIABATIC + err = WriteFieldStatic(dset_ids[d++], Fluids[f]->Energy0->field_cpu); + if (err < 0) return err; +#endif + } + + /* wait until all ranks finish, then flush to disk */ + MPI_Barrier(hdf5.world_comm); + H5Fflush(hdf5.file_id, H5F_SCOPE_GLOBAL); +#endif + + return 0; +} + +int WritePlanetsHdf5() { + int err; + hid_t filespace_id; + hsize_t file_dims, file_start, file_count = 1; + real planet_buffer[FIELDS_PER_PLANET]; + + for (int pl = 0; pl < Sys->nb; ++pl) { + int i = 0; /* local field counter */ + hid_t dset_id = hdf5.planets.dataset_ids[pl]; + + /* get the "old" dataspace global dimensions */ + filespace_id = H5Dget_space(dset_id); + H5Sget_simple_extent_dims(filespace_id, &file_dims, NULL); + H5Sclose(filespace_id); + + /* increase the first (and only) dimension by 1 for this timestep */ + file_start = file_dims; + file_dims += 1; + H5Dset_extent(dset_id, &file_dims); + filespace_id = H5Dget_space(dset_id); + + planet_buffer[i++] = PhysicalTime; + planet_buffer[i++] = Sys->mass[pl]; + + planet_buffer[i++] = Sys->x[pl]; + planet_buffer[i++] = Sys->y[pl]; + planet_buffer[i++] = Sys->z[pl]; + + planet_buffer[i++] = Sys->vx[pl]; + planet_buffer[i++] = Sys->vy[pl]; + planet_buffer[i++] = Sys->vz[pl]; + + if (CPU_Master) { + /* only one rank should write the data */ + H5Sselect_hyperslab(filespace_id, H5S_SELECT_SET, + &file_start, NULL, &file_count, NULL); + } else { + /* others call H5Dwrite with no elements selected */ + H5Sselect_none(filespace_id); + H5Sselect_none(hdf5.planets.memspace_id); + } + + /* write the data */ + err = H5Dwrite(dset_id, hdf5.planets.dtype_id, hdf5.planets.memspace_id, + filespace_id, hdf5.xfpl_id, planet_buffer); + if (err < 0) return err; + + H5Sclose(filespace_id); + } + + return 0; +} + +int WriteRealAttribute(const char *name, real val) { + herr_t err; + hid_t att_id, space_id; + + space_id = H5Screate(H5S_SCALAR); + if (space_id < 0) return -1; + + att_id = H5Acreate(hdf5.file_id, name, + REALTYPE, space_id, H5P_DEFAULT, H5P_DEFAULT); + if (att_id < 0) return -1; + + err = H5Awrite(att_id, REALTYPE, &val); + if (err < 0) return -1; + + H5Aclose(att_id); + H5Sclose(space_id); + + return 0; +} + +int WriteIntAttribute(const char *name, int val) { + herr_t err; + hid_t att_id, space_id; + + space_id = H5Screate(H5S_SCALAR); + if (space_id < 0) return -1; + + att_id = H5Acreate(hdf5.file_id, name, + H5T_STD_I32BE, space_id, H5P_DEFAULT, H5P_DEFAULT); + if (att_id < 0) return -1; + + err = H5Awrite(att_id, H5T_NATIVE_INT, &val); + if (err < 0) return -1; + + H5Aclose(att_id); + H5Sclose(space_id); + + return 0; +} + +int WriteBoolAttribute(const char *name, boolean val) { + herr_t err; + hid_t att_id, space_id; + + space_id = H5Screate(H5S_SCALAR); + if (space_id < 0) return -1; + + att_id = H5Acreate(hdf5.file_id, name, + H5T_STD_B8BE, space_id, H5P_DEFAULT, H5P_DEFAULT); + if (att_id < 0) return -1; + + err = H5Awrite(att_id, H5T_NATIVE_B8, &val); + if (err < 0) return -1; + + H5Aclose(att_id); + H5Sclose(space_id); + + return 0; +} + +int WriteStringAttribute(const char *name, const char *val) { + herr_t err; + hid_t att_id, space_id, type_id; + size_t len; + + space_id = H5Screate(H5S_SCALAR); + if (space_id < 0) return -1; + + type_id = H5Tcopy(H5T_C_S1); + if (type_id < 0) return -1; + + len = strlen(val); + err = H5Tset_size(type_id, len); + if (err < 0) return -1; + + att_id = H5Acreate(hdf5.file_id, name, + type_id, space_id, H5P_DEFAULT, H5P_DEFAULT); + if (att_id < 0) return -1; + + err = H5Awrite(att_id, type_id, val); + if (err < 0) return -1; + + H5Aclose(att_id); + H5Tclose(type_id); + H5Sclose(space_id); + + return 0; +} + +int WriteParametersHdf5() { + for (int i = 0; i < Id_Var; ++i) { + char *var = Var_Set[i].variable; + const char *name = Var_Set[i].name; + + switch (Var_Set[i].type) { + case REAL: + WriteRealAttribute(name, *((real *) var)); + break; + + case INT: + WriteIntAttribute(name, *((int *) var)); + break; + + case BOOL: + WriteBoolAttribute(name, *((boolean *) var)); + break; + + case STRING: + WriteStringAttribute(name, var); + break; + } + } + + return 0; +} + +void TeardownOutputHdf5() { + H5Pclose(hdf5.xfpl_id); + H5Sclose(hdf5.fluids.memspace_id); +#ifdef STOCKHOLM + H5Sclose(hdf5.stockholm.memspace_id); +#endif + + for (int f = 0; f < NFLUIDS; ++f) { + for (int i = 0; i < fields_per_fluid; ++i) { + H5Dclose(hdf5.fluids.dataset_ids[f][i]); + } + + free(hdf5.fluids.dataset_ids[f]); + +#ifdef STOCKHOLM + for (int i = 0; i < static_fields_per_fluid; ++i) { + H5Dclose(hdf5.stockholm.dataset_ids[f][i]); + } + + free(hdf5.stockholm.dataset_ids[f]); +#endif + } + + if (ThereArePlanets && (Sys != NULL)) { + H5Tclose(hdf5.planets.dtype_id); + H5Sclose(hdf5.planets.memspace_id); + + for (int pl = 0; pl < Sys->nb; ++pl) { + H5Dclose(hdf5.planets.dataset_ids[pl]); + } + + free(hdf5.planets.dataset_ids); + } + + H5Fclose(hdf5.file_id); + MPI_Comm_free(&(hdf5.world_comm)); +} diff --git a/src/prototypes.h b/src/prototypes.h index cfcf9e1..378ca2b 100644 --- a/src/prototypes.h +++ b/src/prototypes.h @@ -1,5 +1,5 @@ #ifdef __GPU -#define ex extern "C" +#define ex extern "C" #else #define ex extern #endif @@ -253,9 +253,9 @@ ex void SubStep3_cpu(real); //transport Prototypes ex void VanLeerX(Field*, Field*, Field*, real); -ex void TransportX(Field*, Field*, Field*, real); -ex void TransportY(Field*, Field*, real); -ex void TransportZ(Field*, Field*, real); +ex void TransportX(Field*, Field*, Field*, real); +ex void TransportY(Field*, Field*, real); +ex void TransportZ(Field*, Field*, real); ex void X_advection (Field*, real); ex void transport(real); @@ -314,6 +314,14 @@ ex void write_vtk_header(FILE*, Field*, int); ex void write_vtk_coordinates(FILE*, Field*); ex void write_vtk_scalar(FILE*, Field*); +ex int SetupOutputHdf5(); +ex int WriteDomainHdf5(); +ex int WriteOutputsHdf5(); +ex int WriteOutputs2dHdf5(); +ex int WritePlanetsHdf5(); +ex int WriteParametersHdf5(); +ex void TeardownOutputHdf5(); + //update.c ex void UpdateX_cpu(real, Field*, Field*, Field*); ex void UpdateY_cpu(real, Field*, Field*); @@ -376,7 +384,7 @@ ex void _LorentzForce_cpu(real, int, int, int, int, int, int, int, int, int, int ex void UpdateMagneticField (real, int, int, int); ex void _UpdateMagneticField_cpu(real,int,int,int,int,int,int,int,int,int, Field*,Field*,Field*); - + ex void ComputeMHD (real); ex void ComputeDivergence (Field *, Field *, Field *); diff --git a/src/psys.c b/src/psys.c index 5f42a7c..a002c32 100644 --- a/src/psys.c +++ b/src/psys.c @@ -18,7 +18,7 @@ int FindNumberOfPlanets(char *filename) { } PlanetarySystem *AllocPlanetSystem(int nb) { - char command[512]; + char command[512], **name; real *mass, *x, *y, *z, *vx, *vy, *vz, *acc; boolean *feeldisk, *feelothers; int i; @@ -29,22 +29,23 @@ PlanetarySystem *AllocPlanetSystem(int nb) { fprintf (stderr, "Not enough memory to alloc PlanetarySystem.\n"); prs_exit (1); } - x = (real*)malloc(sizeof(real)*(nb+1)); - y = (real*)malloc(sizeof(real)*(nb+1)); - z = (real*)malloc(sizeof(real)*(nb+1)); - vx = (real*)malloc(sizeof(real)*(nb+1)); - vy = (real*)malloc(sizeof(real)*(nb+1)); - vz = (real*)malloc(sizeof(real)*(nb+1)); - mass = (real*)malloc(sizeof(real)*(nb+1)); - acc = (real*)malloc(sizeof(real)*(nb+1)); + x = malloc(sizeof(real) * nb); + y = malloc(sizeof(real) * nb); + z = malloc(sizeof(real) * nb); + vx = malloc(sizeof(real) * nb); + vy = malloc(sizeof(real) * nb); + vz = malloc(sizeof(real) * nb); + mass = malloc(sizeof(real) * nb); + acc = malloc(sizeof(real) * nb); + name = malloc(sizeof(char *) * nb); if ((x == NULL) || (y == NULL) || (z == NULL) \ || (vx == NULL) || (vy == NULL) || (vz == NULL) \ - || (acc == NULL) || (mass == NULL)) { + || (acc == NULL) || (mass == NULL) || (name == NULL)) { fprintf (stderr, "Not enough memory to alloc components of planetary system.\n"); prs_exit (1); } - feeldisk = (boolean*)malloc(sizeof(char)*(nb+1)); - feelothers = (boolean*)malloc(sizeof(char)*(nb+1)); + feeldisk = malloc(sizeof(char) * nb); + feelothers = malloc(sizeof(char) * nb); if ((feeldisk == NULL) || (feelothers == NULL)) { fprintf (stderr, "Not enough memory for boolean allocation in PlanetarySystem.\n"); prs_exit (1); @@ -57,17 +58,22 @@ PlanetarySystem *AllocPlanetSystem(int nb) { sys->vz= vz; sys->acc=acc; sys->mass = mass; + sys->name = name; sys->FeelDisk = feeldisk; sys->FeelOthers = feelothers; for (i = 0; i < nb; i++) { x[i] = y[i] = z[i] = vx[i] = vy[i] = vz[i] = mass[i] = acc[i] = 0.0; + name[i] = NULL; feeldisk[i] = feelothers[i] = YES; } - for (i = 0; i < nb; i++) { - /* Creates orbit[i].dat if it does not exist */ - sprintf (command, "touch %s/orbit%d.dat", OUTPUTDIR, i); - temp = system (command); - } + +#ifndef HDF5 + for (i = 0; i < nb; i++) { + /* Creates orbit[i].dat if it does not exist */ + sprintf (command, "touch %s/orbit%d.dat", OUTPUTDIR, i); + temp = system (command); + } +#endif sys->x = x; sys->y = y; @@ -103,15 +109,21 @@ PlanetarySystem *AllocPlanetSystem(int nb) { } void FreePlanetary () { - free (Sys->x); - free (Sys->vx); - free (Sys->y); - free (Sys->vy); - free (Sys->mass); - free (Sys->acc); - free (Sys->FeelOthers); - free (Sys->FeelDisk); - free (Sys); + free(Sys->x); + free(Sys->vx); + free(Sys->y); + free(Sys->vy); + free(Sys->mass); + free(Sys->acc); + + for (int i = 0; i < Sys->nb; ++i) { + free(Sys->name[i]); + } + + free(Sys->name); + free(Sys->FeelOthers); + free(Sys->FeelDisk); + free(Sys); } real ComputeInnerMass(real r) { @@ -126,7 +138,7 @@ real ComputeInnerMass(real r) { if(Ymed(j)nb = 0; sys->x = sys->vx = sys->y = sys->vy = NULL; return sys; } - + nb = FindNumberOfPlanets (filename); if (CPU_Master) { if(nb > 1) printf ("%d planets found.\n", nb); @@ -169,6 +181,10 @@ PlanetarySystem *InitPlanetarySystem (char *filename) { while (fgets(s, 510, input) != NULL) { sscanf(s, "%s ", nm); if (isalpha(s[0])) { + size_t namelen = strlen(nm); + sys->name[i] = malloc((namelen + 1) * sizeof(char)); + strcpy(sys->name[i], nm); + s1 = s + strlen(nm); #ifdef FLOAT sscanf(s1 + strspn(s1, "\t :=>_"), "%f %f %f %s %s", &dist, &mass, &accret, test1, test2); @@ -392,7 +408,7 @@ real GetPsysInfo (boolean action) { } switch (action) { - case MARK: + case MARK: X_planet = xc; Y_planet = yc; return 0.; diff --git a/std/stdpar.par b/std/stdpar.par index 5637ae4..4cd53a2 100644 --- a/std/stdpar.par +++ b/std/stdpar.par @@ -89,4 +89,6 @@ WriteVy No WriteVz No WriteDivergence No WriteEnergyRad No -WriteTau No \ No newline at end of file +WriteTau No +FileTag fargo3d +CompressLevel 3