# Invocation command line: # /data/benchmarks/SPEChpc2002/bin/runspec -c chem_m_64.cfg --action validate --strict --reportable -e mpi chem_m ############################################################################ # Invocation command line: # runspec -c chem.cfg --action validate --strict --reportable -e mpi chem_s ############################################################################ # action = validate teeout = yes output_format = asc license_num = HPG0005 company_name = IBM Corporation machine_name = IBM eServer 1350 Cluster hw_vendor = IBM Corporation hw_model = eServer 325 Cluster hw_cpu = AMD Opteron (246) hw_cpu_mhz = 2000 hw_fpu = Integrated hw_ncpuorder = 1,2 per node, unlimited nodes hw_pcache = 64KBI + 64KBD on chip hw_scache = 1024KB(I+D) on chip hw_tcache = None hw_ocache = None hw_parallel = MPI hw_memory = 4GB DDR333 CL2.5 Registered ECC hw_disk = 1 x 36GB SCSI per node hw_other000 = Myrinet M3F-PCIXD-2 Adapters (one per node) hw_other001 = Myrinet M3-E32 Switch Enclosure hw_other002 = Myrinet M3-SW16-8F 8-Port Line Card (4) hw_other003 = x345 File Server hw_other004 = Cisco 4003 GbE Switch hw_other005 = ServeRAID 4H SCSI RAID Adapter hw_other006 = SCSI Drives (10) hw_avail = Oct-2003 sw_os000 = SuSE Linux 8.0 SLES 64 bit Kernel sw_os001 = k_smp-2.4.19-249 (from Service Pack 2) sw_file = Linux/reiserfs sw_state = Multi-user sw_avail = Jul-2003 tester_name = IBM Corporation test_site = Research Triangle Park, NC test_date = Oct-2003 sw_compiler000 = Fortran: Portland Group 5.1-1 Fortran 90 sw_compiler001 = C: Portland Group 5.1-1 C sw_other000 = MPICH 1.2.5.10 sw_other001 = GM 2.0.6_Linux # sw_other002 = AMD Core Math Library (ACML) hw_ncpu = 64 sw_parallel = MPI sw_procs_thrds = 64 sw_ranks = 64 sw_threads = N/A prepared_by = Douglas Pase GCCHOME = /opt/gcc33 GCCBIN = $(GCCHOME)/bin GCCLIB = /usr/lib64/gcc-lib/x86_64-suse-linux/3.2.2 PGIHOME = /usr/pgi/linux86-64/5.1 PGIBIN = $(PGIHOME)/bin PGILIB = $(PGIHOME)/lib CC = $(PGIBIN)/pgcc CXX = $(PGIBIN)/pgCC FC = $(PGIBIN)/pgf90 F77 = $(PGIBIN)/pgf90 CPP = $(PGIBIN)/pgf90 env_vars = yes HPCHOME = /data/benchmarks/SPEChpc2002 HPCCONFIG = $HPCHOME/config ENVHOME = $HPCCONFIG/env MFHOME = $HPCCONFIG/mf BENCHHOME = $HPCHOME/benchspec/HPC2002 MPIHOME = /data/mpichgm-1.2.5.10-64 MPIINC = $(MPIHOME)/include MPILIB = $(MPIHOME)/lib MPIBIN = $MPIHOME/bin MPIEXE = $MPIBIN/mpirun.ch_gm L64LIB = /usr/lib64 GMHOME = /data/gm-2.0.6_Linux-32 GMLIB = $(GMHOME)/lib GMBLIB = $(GMHOME)/binary/lib MPILIBS = -L$(GMBLIB) -L$(GMLIB) -L$(MPILIB) \ -L$(PGILIB) -L$(L64LIB) -L$(GCCLIB) \ -lmpich -lgm -lpthread # MATHDIR = /opt/acml/pgi64 # MATHLIB = -L$(MATHDIR) -lacml # -DSPEC_HPG_MATHLIB # srcalt = mathlib GMNP = -np $hw_ncpu GMMF = -machinefile $MFHOME/n00x2.mf GMRECV = --gm-recv blocking GMSHMEM = --gm-no-shmem # # chem: # SPEC_HPG_MPI_INT4, ENV_MPI_ENABLED={ENABLED,} # ########################################################################### # # MPI # ########################################################################### 370.gamess_s=default=mpi=default: CPORTABILITY = -DSPEC_HPG_MPI_INT4 -DBITS64 FPORTABILITY = -DSPEC_HPG_MPI_INT4 -DBITS64 -i8 COPTIMIZE = -O3 -Munroll -Mnoframe FOPTIMIZE = -O3 -Munroll -Mnoframe CPPFLAGS = -E EXTRA_CFLAGS = -I. -I$(MPIINC) EXTRA_FFLAGS = -I. -I$(MPIINC) EXTRA_LDFLAGS = -Bstatic -tp k8-32 EXTRA_LIBS = $(MPILIBS) use_submit_for_speed = yes BENCHNAME = 370.gamess_s ENV_MPI_ENABLED = ENABLED CHEMCP = cp -rf $ENVHOME/$BENCHNAME.env ~/.ssh/environment CHEMRM = rm -rf ~/.ssh/environment MPIRUN = $MPIEXE $GMNP $GMMF $GMRECV $GMSHMEM -wd `pwd` submit = $CHEMCP ; $MPIRUN $command ; $CHEMRM notes005 = Flags: notes010 = Fortran: notes015 = -DSPEC_HPG_MPI_INT4 -DBITS64 -i8 notes020 = -O3 -Munroll -Mnoframe notes025 = C: notes030 = -DSPEC_HPG_MPI_INT4 -DBITS64 notes035 = -O3 -Munroll -Mnoframe notes040 = Preprocessor: notes045 = -E # notes050 = # notes055 = Alternate Source: # notes060 = none. notes065 = notes070 = Submit command to run applications: notes075 = cp 370.gamess_s.env ~/.ssh/environment ; notes080 = mpirun.ch_gm -np 64 -machinefile n00x2.mf --gm-recv notes085 = blocking --gm-no-shmem -wd `pwd` $command ; notes090 = rm ~/.ssh/environment notes095 = notes100 = Set the following BIOS parameters: notes105 = DRAM Interleave = AUTO notes110 = Node Interleave = Disabled notes115 = ACPI SRAT = Enabled notes120 = notes125 = Cluster Configuration: notes130 = Two CPUs per node notes135 = 370.gamess_s.env contains environment variables notes140 = the benchmark needs for execution in a cluster notes145 = All benchmark files are on a shared file server notes150 = Nodes and file server use NFS shared file system 371.gamess_m=default=mpi=default: CPORTABILITY = -DSPEC_HPG_MPI_INT4 -DBITS64 FPORTABILITY = -DSPEC_HPG_MPI_INT4 -DBITS64 -i8 # COPTIMIZE = -O3 # FOPTIMIZE = -O3 CPPFLAGS = -E EXTRA_CFLAGS = -I. -I$(MPIINC) EXTRA_FFLAGS = -I. -I$(MPIINC) EXTRA_LDFLAGS = -Bstatic EXTRA_LIBS = $(MPILIBS) use_submit_for_speed = yes BENCHNAME = 371.gamess_m ENV_MPI_ENABLED = ENABLED CHEMCP = cp -rf $ENVHOME/$BENCHNAME.env ~/.ssh/environment CHEMRM = rm -rf ~/.ssh/environment MPIRUN = $MPIEXE $GMNP $GMMF $GMRECV $GMSHMEM -wd `pwd` submit = $CHEMCP ; $MPIRUN $command ; $CHEMRM notes005 = Flags: notes010 = Fortran: notes015 = -DSPEC_HPG_MPI_INT4 -DBITS64 -i8 # notes020 = -O3 notes025 = C: notes030 = -DSPEC_HPG_MPI_INT4 -DBITS64 # notes035 = -O3 notes040 = Preprocessor: notes045 = -E # notes050 = # notes055 = Alternate Source: # notes060 = none. notes065 = notes070 = Submit command to run applications: notes075 = cp 371.gamess_m.env ~/.ssh/environment ; notes080 = mpirun.ch_gm -np 64 -machinefile n00x2.mf --gm-recv notes085 = blocking --gm-no-shmem -wd `pwd` $command ; notes090 = rm ~/.ssh/environment notes095 = notes100 = Set the following BIOS parameters: notes105 = DRAM Interleave = AUTO notes110 = Node Interleave = Disabled notes115 = ACPI SRAT = Enabled notes120 = notes125 = Cluster Configuration: notes130 = Two CPUs per node notes135 = 371.gamess_m.env contains environment variables notes140 = the benchmark needs for execution in a cluster notes145 = All benchmark files are on a shared file server notes150 = Nodes and file server use NFS shared file system