# Invocation command line: # /vol/benchnfs1/tmp/xinghong/mpi2007/bin/runspec -c impi_11.cfg --size mref --iterations 3 --reportable --define MPIRUN_OPTS=-genv I_MPI_DEVICE rdssm -genv I_MPI_FALLBACK_DEVICE no -perhost 16 --ranks 128 all # output_root was not used for this run ############################################################################ use_version_url = 1 flagsurl000=http://www.spec.org/mpi2007/flags/EM64T_Intel111_flags.20090729.xml ext = impi makeflags = -j 7 env_vars = 1 basepeak = 1 reportable = 0 ignore_errors = 1 iterations = 1 tune = base size = mref output_format = all use_submit_for_speed = 1 submit = mpiexec %{MPIRUN_OPTS} -np $ranks $command # submit = mpiexec -genv I_MPI_DEVICE rdssm -genv I_MPI_FALLBACK_DEVICE disable -perhost 8 -n $ranks $command FC = mpiifort CC = mpiicc CXX = mpiicpc ##################################################################### # Portability flags ##################################################################### 121.pop2=default=default=default: CPORTABILITY = -DSPEC_MPI_CASE_FLAG 126.lammps=default=default=default: CXXPORTABILITY = -DMPICH_IGNORE_CXX_SEEK 127.wrf2=default=default=default: CPORTABILITY = -DSPEC_MPI_CASE_FLAG -DSPEC_MPI_LINUX ################################################################# # Optimization flags ################################################################# default=default=default=default: OPTIMIZE = -O3 -xSSE4.2 -no-prec-div # For mpi trace LIBS = ################################################################# # Notes ################################################################# test_sponsor = IBM Corporation license_num = 005 tester = IBM Corporation test_date = Jun-2009 hw_avail = Apr-2009 sw_avail = Apr-2009 prepared_by = IBM Corporation system_vendor = IBM Corporation node_fileserver_hw_adapter_fs_firmware = 5721-v3.29a, ASFIPMI v6.08 node_fileserver_hw_adapter_fs_driver = tg3 3.86 node_compute_hw_adapter_fs_firmware = 4.6.4 NCSI 1.0.6 node_compute_hw_adapter_fs_driver = bnx2 1.7.9-1 interconnect_ib_hw_switch_0_firmware = 1.0.0 interconnect_fs_hw_vendor = Cisco interconnect_fs_hw_switch_0_ports = 28 interconnect_fs_hw_switch_0_model = WS-C3750G-24TS-E1U interconnect_fs_hw_switch_0_firmware = 12.2 (35SE5) interconnect_fs_hw_model = 3750G interconnect_ib_hw_switch_0_ports = 24 interconnect_ib_hw_switch_0_model = ISR 9024D-M interconnect_ib_hw_switch_0_data_rate = 4X DDR interconnect_ib_hw_switch_0_count = 1 interconnect_fs_hw_switch_0_data_rate = Fullduplex 1000 interconnect_fs_hw_switch_0_count = 1 node_compute_hw_adapter_fs_slot_type = PCIe x16 Gen2 node_compute_hw_adapter_fs_ports_used = 1 node_compute_hw_adapter_fs_model000= Integrated Broadcom Corporation NetXtreme II node_compute_hw_adapter_fs_model001 = BCM5709 Gigabit Ethernet (rev 20) node_compute_hw_adapter_fs_interconnect = Ethernet node_compute_hw_adapter_fs_data_rate = 10/100/1000 node_compute_hw_adapter_fs_count = 1 interconnect_fs_purpose = File system traffic interconnect_fs_order = 2 interconnect_fs_label = Ethernet interconnect_fs_hw_topo = Single Switch sw_parallel_other = None node_fileserver_sw_sharedfile = NFS node_fileserver_order = 2 node_fileserver_label = IBM System x346 8840 node_fileserver_hw_tcache = None node_fileserver_hw_ocache = None node_fileserver_hw_adapter_fs_slot_type = PCI-X node_fileserver_hw_adapter_fs_ports_used = 1 node_fileserver_hw_adapter_fs_model000= Integrated Broadcom Corporation NetXtreme BCM5721 node_fileserver_hw_adapter_fs_model001 = Gigabit Ethernet PCI Express (rev 11) node_fileserver_hw_adapter_fs_count = 1 system_name000 = System x3550 M2 (Intel Xeon X5570, 2.93 GHz system_name001 = SMT on, Turbo on) # # Computation node info # node_compute_label = IBM System x3550 M2 node_compute_order = 1 node_compute_count = 8 node_compute_purpose = compute, head node_compute_hw_vendor = IBM Corporation node_compute_hw_model = 7946-92U node_compute_hw_cpu_name = Intel Xeon X5570 node_compute_hw_ncpuorder = 1-2 chips node_compute_hw_nchips = 2 node_compute_hw_ncores = 8 node_compute_hw_ncoresperchip = 4 node_compute_hw_nthreadspercore = 2 node_compute_hw_cpu_char000= Intel Turbo Boost Technology up to 3.33 GHz, 6.4 node_compute_hw_cpu_char001 = GT/s node_compute_hw_cpu_char002 = QPI, Hyper-Threading enabled node_compute_hw_cpu_mhz = 2933 node_compute_hw_pcache = 32 KB I + 32 KB D on chip per core node_compute_hw_scache = 256 KB I+D on chip per core node_compute_hw_tcache = 8 MB I+D on chip per chip, 8 MB shared / 4 cores node_compute_hw_ocache = None node_compute_hw_memory = 24 GB (RDIMM 6x4-GB DDR3-1333 MHz) node_compute_hw_disk = 73.4GB 10K-rpm SAS SFF Slim-HS 2.5" HDD node_compute_hw_other = None node_compute_hw_adapter_ib_model = Mellanox ConnectX node_compute_hw_adapter_ib_count = 1 node_compute_hw_adapter_ib_slot_type = PCIe x16 Gen2 node_compute_hw_adapter_ib_data_rate = InfiniBand 4x DDR node_compute_hw_adapter_ib_ports_used = 1 node_compute_hw_adapter_ib_interconnect = InfiniBand node_compute_hw_adapter_ib_driver = OFED 1.4-20090301-0600 node_compute_hw_adapter_ib_firmware = 2.6.000 node_compute_sw_os = Red Hat EL 5.3, kernel 2.6.18-128.el5 node_compute_sw_localfile = Linux/ext3 node_compute_sw_sharedfile = NFS node_compute_sw_state = Multi-User node_compute_sw_other =None # # Fileserver node info # node_fileserver_count = 1 node_fileserver_purpose = fileserver node_fileserver_hw_vendor = IBM Corporation node_fileserver_hw_model = 884045Y node_fileserver_hw_cpu_name = Intel Xeon EM64T node_fileserver_hw_ncpuorder = 2 chips node_fileserver_hw_nchips = 2 node_fileserver_hw_ncores = 2 node_fileserver_hw_ncoresperchip = 1 node_fileserver_hw_nthreadspercore = 1 node_fileserver_hw_cpu_char = 800 MHz FSB node_fileserver_hw_cpu_mhz = 3600 node_fileserver_hw_pcache = 32 KB I + 32 KB D on chip per core node_fileserver_hw_scache = 1 MB I+D on chip per chip node_fileserver_hw_memory = 4 GB node_fileserver_hw_disk = SCSI node_fileserver_hw_other = None node_fileserver_hw_adapter_fs_data_rate = 10/100/1000 node_fileserver_hw_adapter_fs_interconnect = Ethernet node_fileserver_sw_os = Red Hat EL 5.2, kernel 2.6.18-92.el5 node_fileserver_sw_localfile = None node_fileserver_sw_state = Multi-User node_fileserver_sw_other = None # # IB interconnect # interconnect_ib_label = InfiniBand interconnect_ib_order = 1 interconnect_ib_purpose = MPI traffic interconnect_ib_hw_vendor = Voltaire interconnect_ib_hw_model = 24 4X DDR port Bundle interconnect_ib_hw_topo = Single Switch # # IB interconnect # #interconnect_ib_hw_switch_3600_model = XXX Mellanox MTS3600Q-1UNC #interconnect_ib_hw_switch_3600_count = XXX 9 #interconnect_ib_hw_switch_3600_ports = XXX 324 #interconnect_ib_hw_topo = Fat tree #interconnect_ib_hw_switch_3600_data_rate = InfiniBand 4x QDR #interconnect_ib_hw_switch_3600_firmware = 7.1.000 # # Cluster file system interconnect # # # Hardware # system_class = Homogeneous max_ranks = 128 max_peak_ranks = 128 # # Software # sw_c_compiler = Intel C++ Compiler 11.0.083 for Linux sw_cxx_compiler = Intel C++ Compiler 11.0.083 for Linux sw_f_compiler = Intel Fortran Compiler 11.0.083 for Linux sw_base_ptrsize = 64-bit sw_peak_ptrsize = 64-bit sw_mpi_library = Intel MPI Library 3.2.1.009 for Linux sw_mpi_other = None sw_preprocessors = No sw_other000= Intel MPI Library 3.2.1.009 for Linux sw_other001 = Multi-Purpose sw_other002 = Daemon (MPD) # The following section was added automatically, and contains settings that # did not appear in the original configuration file, but were added to the # raw file after the run. default: flagsurl000 = http://www.spec.org/mpi2007/flags/EM64T_Intel111_flags.xml ########################################################## ENDHERE ####### # The following section was added automatically, and contains settings that # did not appear in the original configuration file, but were added to the # raw file after the run. default: notes_submit_000 =submit = mpiexec -genv -np $ranks $command node_compute_notes_000 =BIOS settings notes: node_compute_notes_005 = Intel Hyper-Threading Technology (SMT): Enabled node_compute_notes_010 = Intel Turbo Boost Technology (Turbo) : Enabled node_compute_notes_015 = node_compute_notes_020 =RAM configuration notes: Each compute node has node_compute_notes_025 = 1x4-GB RDIMM on each memory channel.