# Invocation command line: # /N/dc2/projects/hpc/lijunj/spec/mpi2007-2.0.1-run/br2p_12/bin/runspec -I --reportable -c intel-esm.cfg -F EM64T_Intel140_flags.xml --ranks=96 -define nc=4 medium # output_root was not used for this run ############################################################################ # Invocation command line: # /N/dc2/scratch/huili/mpi2007-2.01-br2/bin/runspec --reportable -c br2_intel -F EM64T_Intel111m_flags.xml --ranks=32 -define hosts=/var/spool/torque/aux//781927 -define CRAY_CORES_NODE=32 -define CORES=32 medium # output_root was not used for this run ############################################################################ # Invocation command line: # /N/dc2/scratch/huili/mpi2007-2.01-br2/bin/runspec --reportable -c br2_intel --ranks=32 -define hosts=/var/spool/torque/aux//772360 -define CRAY_CORES_NODE=32 -define CORES=32 medium # output_root was not used for this run ############################################################################ # Invocation command line: # runspec --size mref --ranks 32 --config br2_intel.cfg --noreportable --iterations 1 --action run --tune base all # output_root was not used for this run ############################################################################ version_url = version.txt action = validate teeout = yes env_vars = 1 output_format = all iterations = 3 tune = base size = mref runlist = medium makeflags = -j 8 license_num = 45 company_name = Indiana University machine_name = BigRed2 Plus hw_avail = sw_avail = sw_other = None prepared_by = Junjie Li CC = cc CXX = CC FC = ftn F77 = ftn CPP = CC CPPFLAGS = -E MPI_INC = MPI_LIB = MPI_BIN = use_submit_for_speed = yes submit = srun -c %{nc} -n $ranks -q $command ##################################################################### # Portability flags ##################################################################### 121.pop2=default=default=default: CPORTABILITY = -DSPEC_MPI_CASE_FLAG 126.lammps=default=default=default: CXXPORTABILITY = -DMPICH_IGNORE_CXX_SEEK 127.wrf2=default=default=default: CPORTABILITY = -DSPEC_MPI_CASE_FLAG -DSPEC_MPI_LINUX 130.socorro=default=default=default: srcalt=nullify_ptrs FPORTABILITY = -assume nostd_intent_in 132.zeusmp2=default=default=default: ################################################################# # Base Optimization flags ################################################################# default=base: OPTIMIZE = -O3 -ansi-alias -no-prec-div -ipo -xhost -fp-model fast=2 #OPTIMIZE = -O3 -no-prec-div ################################################################# # Notes ################################################################# test_sponsor = Indiana University license_num = 3440A tester = Indiana University hw_avail = Apr-2013 sw_avail = Feb-2017 prepared_by = Indiana University system_vendor = Cray interconnect_fs_hw_switch_fs_ports = 108 interconnect_fs_hw_switch_fs_model = Mellanox SX6506 interconnect_fs_hw_switch_fs_firmware = mellanox SX6506 interconnect_fs_hw_switch_fs_data_rate = 56 Gbps interconnect_fs_hw_switch_fs_count = 1 system_name = Cray XC30 (Intel Xeon E5-2697 v2) node_compute_hw_adapter_aries_slot_type = PCIe x16 Gen 3 node_compute_hw_adapter_aries_ports_used = 4 node_compute_hw_adapter_aries_model = Cray Aries node_compute_hw_adapter_aries_interconnect = Aries node_compute_hw_adapter_aries_firmware = v004.r091 node_compute_hw_adapter_aries_driver = Proprietary Cray_kgni node_compute_hw_adapter_aries_data_rate = 126 Gbps node_compute_hw_adapter_aries_count = 1 interconnect_aries_purpose = MPI traffic interconnect_aries_order = 4 interconnect_aries_label = Cray Aries interconnect_aries_hw_vendor = Cray interconnect_aries_hw_topo = Dragonfly interconnect_aries_hw_switch_aries_ports = 48 interconnect_aries_hw_switch_aries_model = Cray Aries interconnect_aries_hw_switch_aries_firmware = v004.r091 interconnect_aries_hw_switch_aries_data_rate = 126 Gb/s interconnect_aries_hw_switch_aries_count = 144 interconnect_aries_hw_model = Cray Aries # # Compute node info # node_compute_label = Big Red II Plus Node node_compute_order = 1 node_compute_count = 8 node_compute_purpose = compute node_compute_hw_vendor = Cray node_compute_hw_model = XC30 node_compute_hw_cpu_name = Intel Xeon E5-2697 v2 node_compute_hw_ncpuorder = 1-2 chips node_compute_hw_nchips = 2 node_compute_hw_ncores = 24 node_compute_hw_ncoresperchip = 12 node_compute_hw_nthreadspercore = 2 node_compute_hw_cpu_char000 = Intel Turbo Boost Technology disabled, node_compute_hw_cpu_char001 = Hyper-Threading enabled node_compute_hw_cpu_mhz = 2700 node_compute_hw_pcache000 = 32 KB I + 32 KB D on chip per core node_compute_hw_scache = 256 KB I+D on chip per core node_compute_hw_tcache = 30 MB I+D on chip per chip node_compute_hw_ocache = None node_compute_hw_memory000 = 64 GB (8 x 8 GB 2Rx4 PC3-14900R-13, ECC) node_compute_hw_disk = None node_compute_hw_other = None node_compute_hw_adapter_IB_model =Mellanox Technologies MT27500 ConnectX-3 node_compute_hw_adapter_IB_count = 1 node_compute_hw_adapter_IB_slot_type = PCIe x16 Gen 3 node_compute_hw_adapter_IB_data_rate = 40Gbps node_compute_hw_adapter_IB_ports_used = 1 node_compute_hw_adapter_IB_interconnect = 40 Gigabit Infiniband (QDR) node_compute_hw_adapter_IB_driver = 1.0-ofed1.5.4.1 node_compute_hw_adapter_IB_firmware = 2.33.5100 node_compute_sw_os000 = SUSE Linux Enterprise Server 11 SP3 (x86_64), node_compute_sw_os001 = Cray Linux Environment 5.2 node_compute_sw_os002 = 3.0.101-0.46.1_1.0502.8871-cray_ari_c node_compute_sw_localfile = None node_compute_sw_sharedfile = Lustre node_compute_sw_state = Multi-User node_compute_sw_other = Slurm 15.08.12 # # Fileserver node info # node_fileserver_label = Data Capacitor II node_fileserver_order = 2 node_fileserver_count = 2 node_fileserver_purpose = fileserver node_fileserver_hw_vendor = DDN node_fileserver_hw_model = DDN SFA12K node_fileserver_hw_cpu_name = Intel Xeon CPU E5-2620 node_fileserver_hw_ncpuorder = 1-2 chips node_fileserver_hw_nchips = 2 node_fileserver_hw_ncores = 12 node_fileserver_hw_ncoresperchip = 6 node_fileserver_hw_nthreadspercore = 1 node_fileserver_hw_cpu_char = Intel Turbo Boost Technology up to 2.50 GHz node_fileserver_hw_cpu_mhz = 2000 node_fileserver_hw_pcache = 32 KB I + 32 KB D on chip per core node_fileserver_hw_scache = 256 KB I+D on chip per core node_fileserver_hw_tcache = 15 MB I+D on chip per chip node_fileserver_hw_ocache = None node_fileserver_hw_memory = 96 GB node_fileserver_hw_disk000= 30 TB RAID 6, 10 (8 + 2) x 3 TB SAS node_fileserver_hw_disk001 = Hitachi HUS724030ALS640, 7200RPM, 6.0Gbps node_fileserver_hw_other = None node_fileserver_hw_adapter_fs_model = Mellanox ConnectX MHQH29-XTC node_fileserver_hw_adapter_fs_count = 1 node_fileserver_hw_adapter_fs_slot_type = PCIe x8 Gen 2 node_fileserver_hw_adapter_fs_data_rate = 40Gbps node_fileserver_hw_adapter_fs_ports_used = 1 node_fileserver_hw_adapter_fs_interconnect = 40 Gigabit Infiniband (QDR) node_fileserver_hw_adapter_fs_driver = 1.0-ofed1.5.4.1 node_fileserver_hw_adapter_fs_firmware = 2.9.1000 node_fileserver_sw_os = CentOS 6.2 node_fileserver_sw_localfile = Linux/ext4 node_fileserver_sw_sharedfile = lustre node_fileserver_sw_state = Multi-User node_fileserver_sw_other = None # # Gemini interconnect # # # Management 1GE interconnect # interconnect_fs_label = Infiniband (QDR) interconnect_fs_order = 3 interconnect_fs_purpose = Lustre fileserver interconnect_fs_hw_vendor = DDN interconnect_fs_hw_model = Mellanox SX6506 interconnect_fs_hw_topo = switched # # Hardware # system_class = Homogeneous max_ranks = 512 max_peak_ranks = 512 # # Software # sw_c_compiler000= Intel C Composer XE 2017 for Linux, sw_c_compiler001 = Version 17.0.2.174 Build 20170213 sw_cxx_compiler000= Intel C++ Composer XE 2017 for Linux, sw_cxx_compiler001 = Version 17.0.2.174 Build 20170213 sw_f_compiler000= Intel Fortran Composer XE 2017 for Linux, sw_f_compiler001 = Version 17.0.2.174 Build 20170213 sw_auto_parallel = sw_base_ptrsize = 64-bit sw_peak_ptrsize = 64-bit sw_mpi_library = Cray MPI (MPT) 7.5.0 sw_mpi_other = None sw_preprocessors = No sw_other = None # # General notes # notes_010 =MPI startup command: notes_015 = srun command was used to start MPI jobs. notes_020 = notes_025 =export MPICH_NO_BUFFER_ALIAS_CHECK=true notes_030 = If set, the buffer alias error check for collectives is notes_035 = disabled. The MPI standard does not allow aliasing of type notes_040 = OUT or INOUT parameters on the same collective function notes_045 = call. The default is false. notes_050 = notes_055 =Job placement: notes_060 = Slurm is used for job placement. notes_065 = Compute nodes are selected by Slurm. notes_070 = No specific node selection is used. # The following section was added automatically, and contains settings that # did not appear in the original configuration file, but were added to the # raw file after the run. default: flagsurl000 = http://www.spec.org/mpi2007/flags/EM64T_Intel140_flags.20150429.00.xml notes_005 = # The following section was added automatically, and contains settings that # did not appear in the original configuration file, but were added to the # raw file after the run. default: notes_000 =130.socorro (base): "nullify_ptrs" src.alt was used. flagsurl000 = http://www.spec.org/mpi2007/flags/EM64T_Intel170_flags.xml notes_submit_000 =submit = srun -c 4 -n $ranks -q $command