# Invocation command line: # /home/gzagorod/work/mpi2007/bin/runspec --reportable --config intel_mpi2007.cfg --define EXEC_PERHOST=8 --flagsurl EM64T_Intel101_flags.xml --size mref --iterations 3 --output_format=all --ranks=64 all # output_root was not used for this run ############################################################################ ##################################################################### # # Config file to run SPEC MPI2007 with Intel Software Toolchain # (Intel Compiler 10.1 and Intel MPI 3.1) # ##################################################################### env_vars = 1 basepeak = 1 reportable = 1 ignore_errors = 0 iterations = 3 makeflags = -j 4 tune = base size = mref use_version_url = 1 version_url = file:current_version use_submit_for_speed = 1 output_format = all FC = mpiifort CC = mpiicc CXX = mpiicpc ##################################################################### # Portability flags ##################################################################### 121.pop2=default=default=default: CPORTABILITY = -DSPEC_MPI_CASE_FLAG 126.lammps=default=default=default: CXXPORTABILITY = -DMPICH_IGNORE_CXX_SEEK 127.wrf2=default=default=default: CPORTABILITY = -DSPEC_MPI_CASE_FLAG -DSPEC_MPI_LINUX 130.socorro=default=default=default: CPORTABILITY = -DSPEC_EIGHT_BYTE_LONG ################################################################# # Optimization flags ################################################################# default=default=default=default: OPTIMIZE = -O3 -no-prec-div -fno-alias -xT submit = mpiexec -genv I_MPI_DEVICE rdssm -genv I_MPI_FALLBACK_DEVICE disable -perhost 8 -n $ranks $command ################################################################# # Notes ################################################################# test_sponsor = Intel Corporation license_num = 13 tester = Grigory Zagorodnev test_date = Jan-2008 hw_avail = Dec-2007 sw_avail = Dec-2007 prepared_by = Intel Corporation system_vendor = Intel Corporation system_name = Endeavor interconnect_ib_hw_topo = Star interconnect_fs_hw_topo = Star # # Computation node info # node_compute_label = Endeavor Node node_compute_order = 1 node_compute_count = 8 node_compute_purpose = compute node_compute_hw_vendor = Intel node_compute_hw_model = SR1560SF node_compute_hw_cpu_name = Intel Xeon CPU E5462 node_compute_hw_ncpuorder = 1-2 chips node_compute_hw_nchips = 2 node_compute_hw_ncores = 8 node_compute_hw_ncoresperchip = 4 node_compute_hw_nthreadspercore = 1 node_compute_hw_cpu_char = 1600MHz FSB node_compute_hw_cpu_mhz = 2800 node_compute_hw_pcache = 32 KB I + 32 KB D on chip per core node_compute_hw_scache = 12 MB I+D on chip per chip, 6 MB shared / 2 cores node_compute_hw_tcache = None node_compute_hw_ocache = None node_compute_hw_memory = 16 GB (FBDIMM 16x1-GB 667 MHz) node_compute_hw_disk = Seagate Barracuda ES 250 GB ST3250620NS node_compute_hw_other = None node_compute_hw_adapter_ib_model = Mellanox MHGH28-XTC IB DDR (PCIe x8 Gen1 2.5 GT/s) node_compute_hw_adapter_ib_count = 1 node_compute_hw_adapter_ib_slot_type = PCIe x16 Gen2 node_compute_hw_adapter_ib_data_rate = InfiniBand 4x DDR node_compute_hw_adapter_ib_ports_used = 1 node_compute_hw_adapter_ib_interconnect = InfiniBand node_compute_hw_adapter_ib_driver = OFED 1.2.5 node_compute_hw_adapter_ib_firmware = 2.2 node_compute_hw_adapter_fs_model000= Intel (ESB2) 82563EB Dual-Port Gigabit Ethernet node_compute_hw_adapter_fs_model001 = Controller node_compute_hw_adapter_fs_count = 1 node_compute_hw_adapter_fs_slot_type = PCIe x8 node_compute_hw_adapter_fs_data_rate = 1Gbps Ethernet node_compute_hw_adapter_fs_ports_used = 1 node_compute_hw_adapter_fs_interconnect = Ethernet node_compute_hw_adapter_fs_driver = e1000 node_compute_hw_adapter_fs_firmware = N/A node_compute_sw_os = RedHat EL 4 Update 4 node_compute_sw_localfile = Linux/ext3 node_compute_sw_sharedfile = DirectFlow node_compute_sw_state = Multi-User node_compute_sw_other = PBS Pro 8.0 # # Fileserver node info # node_fileserver_label = Panasas Fileserver node_fileserver_order = 2 node_fileserver_count = 1 node_fileserver_purpose = fileserver node_fileserver_hw_vendor = Panasas node_fileserver_hw_model = ActiveStor 3050 node_fileserver_hw_cpu_name = -- node_fileserver_hw_ncpuorder = 1-2 chips node_fileserver_hw_nchips = 1 node_fileserver_hw_ncores = 1 node_fileserver_hw_ncoresperchip = 1 node_fileserver_hw_nthreadspercore = 1 node_fileserver_hw_cpu_char = -- node_fileserver_hw_cpu_mhz = 0 node_fileserver_hw_pcache = None node_fileserver_hw_scache = None node_fileserver_hw_tcache = None node_fileserver_hw_ocache = None node_fileserver_hw_memory = 1 MB node_fileserver_hw_disk = 140 disks, 250GB/disk, 35TB total, 7 Shelves node_fileserver_hw_other = None node_fileserver_hw_adapter_fs_model = -- node_fileserver_hw_adapter_fs_count = 1 node_fileserver_hw_adapter_fs_slot_type = -- node_fileserver_hw_adapter_fs_data_rate = 1Gbps Ethernet node_fileserver_hw_adapter_fs_ports_used = 4 node_fileserver_hw_adapter_fs_interconnect = Ethernet node_fileserver_hw_adapter_fs_driver = -- node_fileserver_hw_adapter_fs_firmware = N/A node_fileserver_sw_os = 2.3.7.a-195733.1 node_fileserver_sw_localfile = PanFS node_fileserver_sw_sharedfile = DirectFlow node_fileserver_sw_state = Multi-User node_fileserver_sw_other = None # # IB interconnect # interconnect_ib_label = IB Switch interconnect_ib_order = 1 interconnect_ib_purpose = MPI traffic interconnect_ib_hw_vendor = SilverStorm Technologies interconnect_ib_hw_model = Cisco SFS 7024D interconnect_ib_hw_switch_9080_model = Cisco SFS 7024D interconnect_ib_hw_switch_9080_count = 1 interconnect_ib_hw_switch_9080_ports = 288 interconnect_ib_hw_switch_9080_data_rate = InfiniBand 4x DDR interconnect_ib_hw_switch_9080_firmware = 4.1.1.1.2 # # Cluster file system interconnect # interconnect_fs_label = Gigabit Ethernet interconnect_fs_order = 2 interconnect_fs_purpose = file system traffic interconnect_fs_hw_vendor = Cisco interconnect_fs_hw_model = Cisco Catalyst 4510 interconnect_fs_hw_switch_fs_model = Cisco Catalyst 4510 interconnect_fs_hw_switch_fs_count = 1 interconnect_fs_hw_switch_fs_ports = 332 interconnect_fs_hw_switch_fs_data_rate = 1Gbps Ethernet interconnect_fs_hw_switch_fs_firmware = N/A # # Hardware # system_class = Homogeneous max_ranks = 256 max_peak_ranks = 256 # # Software # sw_c_compiler = Intel C++ Compiler 10.1 for Linux (10.1.004) sw_cxx_compiler = Intel C++ Compiler 10.1 for Linux (10.1.004) sw_f_compiler = Intel Fortran Compiler 10.1 for Linux (10.1.004) sw_auto_parallel = No sw_base_ptrsize = 64-bit sw_peak_ptrsize = 64-bit sw_mpi_library = Intel MPI Library 3.1 for Linux (3.1.020) sw_mpi_other = None sw_preprocessors = No sw_other000= None # The following section was added automatically, and contains settings that # did not appear in the original configuration file, but were added to the # raw file after the run. default: flagsurl000 = http://www.spec.org/mpi2007/flags/EM64T_Intel101_flags.xml nc000 = SPEC has determined that this result was not in compliance with the SPEC nc001 = MPI2007 run and reporting rules. Specifically, the result did not meet the nc002 = requirement for baseline optimization flags to not use assertion flags (the nc003 = flag -fno-alias is a violation of this rule). The result was found to be nc004 = performance neutral compared to runs without -fno-alias. Replacement nc005 = results can be found at http://www.spec.org/mpi2007/results/res2008q4/mpi2007-20080922-00066.html