# Invocation command line: # /nas/store/jbaron/mpi2007-1.1/bin/runspec --ranks 128 --reportable -a validate -n 2 -o asc -i mref -T base -c sgi-linux-x86_64-intel-sgimpi.cfg medium # output_root was not used for this run ############################################################################ #################################################################### # # defaults # #################################################################### #flagsurl = http://www.spec.org/mpi2007/flags/EM64T_Intel101_flags.20080618.xml flagsurl000= http://www.spec.org/mpi2007/flags/EM64T_Intel101_flags.20080611.xml ext=sgimpi action=validate tune=base input=ref teeout=no env_vars=1 no_input_handler=null mean_anyway=1 FC = ifort CC = icc CXX = icpc #################################################################### # # Base flags # #################################################################### default=base=default=default: FOPTIMIZE = -O3 -ipo -xT -no-prec-div COPTIMIZE = -O3 -ipo -xT -no-prec-div CXXOPTIMIZE = -O3 -ipo -xT -no-prec-div -ansi-alias EXTRA_LIBS = -lmpi default=default=default=default: use_submit_for_speed=1 submit=mpiexec -n $ranks $command #################################################################### # # Portability flags # #################################################################### 121.pop2=default=default=default: CPORTABILITY = -DSPEC_MPI_CASE_FLAG 127.wrf2=default=default=default: CPORTABILITY = -DSPEC_MPI_CASE_FLAG -DSPEC_MPI_LINUX # The following section was added automatically, and contains settings that # did not appear in the original configuration file, but were added to the # raw file after the run. default: hw_avail = Mar-2009 license_num = 4 prepared_by = John Baron sw_avail = Jan-2009 sw_base_ptrsize = 64-bit sw_other = None sw_peak_ptrsize = 64-bit system_vendor = SGI test_sponsor = SGI tester = SGI system_class = Homogeneous sw_preprocessors = None sw_mpi_other = OFED 1.3.1 sw_mpi_library = SGI MPT 1.23 interconnect_IO_hw_model = MT26418 ConnectX interconnect_IO_hw_switch_1_count = 8 interconnect_IO_hw_switch_1_data_rate = InfiniBand 4x DDR interconnect_IO_hw_switch_1_firmware = 2020001 interconnect_IO_hw_switch_1_model = Mellanox MT47396 InfiniScale-III interconnect_IO_hw_switch_1_ports = 24 interconnect_IO_hw_topo = Bristle hypercube with express links interconnect_IO_hw_vendor = Mellanox Technologies interconnect_IO_label = InfiniBand (I/O) interconnect_IO_order = 2 interconnect_IO_purpose = I/O traffic interconnect_MPI_hw_model = MT26418 ConnectX interconnect_MPI_hw_switch_1_count = 8 interconnect_MPI_hw_switch_1_data_rate = InfiniBand 4x DDR interconnect_MPI_hw_switch_1_firmware = 2020001 interconnect_MPI_hw_switch_1_model = Mellanox MT47396 InfiniScale III interconnect_MPI_hw_switch_1_ports = 24 interconnect_MPI_hw_topo = Bristle hypercube with express links interconnect_MPI_hw_vendor = Mellanox Technologies interconnect_MPI_label = InfiniBand (MPI) interconnect_MPI_order = 1 interconnect_MPI_purpose = MPI traffic node_compute_count = 16 node_compute_hw_adapter_IB_count = 1 node_compute_hw_adapter_IB_data_rate = InfiniBand 4x DDR node_compute_hw_adapter_IB_driver = OFED-1.3.1 node_compute_hw_adapter_IB_firmware = 2.5.0 node_compute_hw_adapter_IB_interconnect = InfiniBand node_compute_hw_adapter_IB_model000 = Mellanox MT26418 ConnectX IB DDR node_compute_hw_adapter_IB_model001 = (PCIe x8 Gen2 5 GT/s) node_compute_hw_adapter_IB_ports_used = 2 node_compute_hw_adapter_IB_slot_type = PCIe x8 Gen2 node_compute_hw_cpu_char000= Intel Turbo Boost Technology up to 3.33 GHz, node_compute_hw_cpu_char001 = 6.4 GT/s QPI, Hyper-Threading enabled node_compute_hw_cpu_mhz = 2934 node_compute_hw_cpu_name = Intel Xeon X5570 node_compute_hw_disk = None node_compute_hw_memory = 48 GB (12*4GB DDR3-1066 CL7 RDIMMs) node_compute_hw_model = SGI Altix ICE 8200EX (Intel Xeon X5570, 2.93 GHz) node_compute_hw_nchips = 2 node_compute_hw_ncores = 8 node_compute_hw_ncoresperchip = 4 node_compute_hw_ncpuorder = 1-2 chips node_compute_hw_nthreadspercore = 2 node_compute_hw_ocache = None node_compute_hw_other = None node_compute_hw_pcache = 32 KB I + 32 KB D on chip per core node_compute_hw_scache = 256 KB I+D on chip per core node_compute_hw_tcache = 8 MB I+D on chip per chip node_compute_hw_vendor = SGI node_compute_label = SGI Altix ICE 8200EX Compute Node node_compute_order = 2 node_compute_purpose = compute node_compute_sw_localfile = NFSv3 node_compute_sw_os000 = SUSE Linux Enterprise Server 10 (x86_64) SP2 node_compute_sw_os001 = Kernel 2.6.16.60-0.30-smp node_compute_sw_other = SGI ProPack 6 for Linux Service Pack 2 node_compute_sw_sharedfile = NFSv3 IPoIB node_compute_sw_state = Multi-user, run level 3 node_fileserver_count = 1 node_fileserver_hw_adapter_IB_count = 2 node_fileserver_hw_adapter_IB_data_rate = InfiniBand 4x DDR node_fileserver_hw_adapter_IB_driver = OFED-1.3 node_fileserver_hw_adapter_IB_firmware = 5.3.0 node_fileserver_hw_adapter_IB_interconnect = InfiniBand node_fileserver_hw_adapter_IB_model000 = Mellanox MT25208 InfiniHost III Ex node_fileserver_hw_adapter_IB_model001 = (PCIe x8 Gen1 2.5 GT/s) node_fileserver_hw_adapter_IB_ports_used = 2 node_fileserver_hw_adapter_IB_slot_type = PCIe x8 Gen1 node_fileserver_hw_cpu_char = 1333 MHz FSB node_fileserver_hw_cpu_mhz = 2328 node_fileserver_hw_cpu_name = Intel Xeon 5140 node_fileserver_hw_disk000 = 7 TB RAID 5 node_fileserver_hw_disk001 = 48 x 147 GB SAS (Seagate Cheetah 15000 rpm) node_fileserver_hw_memory = 24 GB (6*4GB DDR2-400 DIMMS) node_fileserver_hw_model = SGI Altix XE 240 (Intel Xeon 5140, 2.33 GHz) node_fileserver_hw_nchips = 2 node_fileserver_hw_ncores = 4 node_fileserver_hw_ncoresperchip = 2 node_fileserver_hw_ncpuorder = 1-2 chips node_fileserver_hw_nthreadspercore = 1 node_fileserver_hw_ocache = None node_fileserver_hw_other = None node_fileserver_hw_pcache = 32 KB I + 32 KB D on chip per core node_fileserver_hw_scache = 4 MB I+D on chip per chip node_fileserver_hw_tcache = None node_fileserver_hw_vendor = SGI node_fileserver_label = SGI InfiniteStorage Nexis 2000 NAS node_fileserver_order = 3 node_fileserver_purpose = fileserver node_fileserver_sw_localfile = xfs node_fileserver_sw_os000 = SUSE Linux Enterprise Server 10 (x86_64) SP1 node_fileserver_sw_os001 = Kernel 2.6.16.54-0.2.5-smp node_fileserver_sw_other = SGI ProPack 5 for Linux Service Pack 5 node_fileserver_sw_state = Multi-user, run level 3 sw_c_compiler000 = Intel C Compiler for Linux sw_c_compiler001 = Version 10.1, Build 20080801 sw_cxx_compiler000 = Intel C++ Compiler for Linux sw_cxx_compiler001 = Version 10.1, Build 20080801 sw_f_compiler000 = Intel Fortran Compiler for Linux sw_f_compiler001 = Version 10.1, Build 20080801 system_name000 = SGI Altix ICE 8200EX system_name001 = (Intel Xeon X5570, 2.93 GHz) notes_000 = Software environment: notes_005 = setenv MPI_REQUEST_MAX 65536 notes_010 = Determines the maximum number of nonblocking sends and notes_015 = receives that can simultaneously exist for any single MPI notes_020 = process. MPI generates an error message if this limit notes_025 = (or the default, if not set) is exceeded. Default: 16384 notes_030 = setenv MPI_TYPE_MAX 32768 notes_035 = Determines the maximum number of data types that can notes_040 = simultaneously exist for any single MPI process. notes_045 = MPI generates an error message if this limit (or the default, notes_050 = if not set) is exceeded. Default: 1024 notes_055 = setenv MPI_BUFS_THRESHOLD 1 notes_060 = Determines whether MPT uses per-host or per-process message notes_065 = buffers for communicating with other hosts. Per-host buffers notes_070 = are generally faster but for jobs running across many hosts they notes_075 = can consume a prodigious amount of memory. MPT will use per- notes_080 = host buffers for jobs using up to and including this many hosts notes_085 = and will use per-process buffers for larger host counts. notes_090 = Default: 64 notes_095 = setenv MPI_DSM_DISTRIBUTE notes_100 = Activates NUMA job placement mode. This mode ensures that each notes_105 = MPI process gets a unique CPU and physical memory on the node notes_110 = with which that CPU is associated. Currently, the CPUs are notes_115 = chosen by simply starting at relative CPU 0 and incrementing notes_120 = until all MPI processes have been forked. notes_125 = notes_130 = limit stacksize unlimited notes_135 = Removes limits on the maximum size of the automatically- notes_140 = extended stack region of the current process and each notes_145 = process it creates. notes_150 = PBS Pro batch scheduler (www.altair.com) is used with notes_155 = placement sets to ensure each MPI job is assigned to notes_160 = a topologically compact set of nodes notes_165 = BIOS settings: notes_170 = AMI BIOS version 8.15 notes_175 = Hyper-Threading Technology enabled (default) notes_180 = Intel Turbo Boost Technology enabled (default) notes_185 = Intel Turbo Boost Technology activated in the OS via notes_190 = /etc/init.d/acpid start notes_195 = /etc/init.d/powersaved start notes_200 = powersave -f