# Invocation command line: # /home/root/specmpi2007/bin/runspec --config=Narvi.cfg --size=mref --tune=all --reportable --rank=112 --iteration=3 medium # output_root was not used for this run ############################################################################ ############################################################################ ##################################################################### # # Config file to run SPEC MPI2007 with Intel Software Toolchain # (Intel Compiler 17.0.2 and Intel MPI 17.1.132) # ##################################################################### env_vars=1 basepeak=1 reportable=1 ignore_errors=0 iterations=3 makeflags= -j 8 tune=base size=mref use_version_url=1 use_submit_for_speed =1 output_format= all FC= mpiifort CC= mpiicc CXX= mpiicpc #include: Proton8S.inc # ----- Begin inclusion of 'Proton8S.inc' ############################################################################ ################################################################# # Notes ################################################################# test_sponsor = Lenovo Global Technology license_num = 28 tester = Lenovo Global Technology test_date = Aug-2018 hw_avail = Oct-2020 sw_avail = Oct-2020 prepared_by = Lenovo Global Technology system_vendor = Lenovo Global Technology node_fileserver_sw_state = Multi-User, run level 3 node_fileserver_sw_sharedfile = NFS node_fileserver_sw_other = None node_fileserver_sw_os = SUSE Linux Enterprise Server 15 SP2 node_fileserver_sw_localfile = None node_fileserver_purpose = Fileserver node_fileserver_order = 2 node_fileserver_label = NFS node_fileserver_hw_vendor = Lenovo Global Technology node_fileserver_hw_tcache = 39424 KB I+D on chip per chip node_fileserver_hw_scache = 1 MB I+D on chip per core node_fileserver_hw_pcache = 32 KB I + 32 KB D on chip per core node_fileserver_hw_other = None node_fileserver_hw_ocache = None node_fileserver_hw_nthreadspercore = 1 node_fileserver_hw_ncpuorder = 2,4 chips node_fileserver_hw_ncoresperchip = 28 node_fileserver_hw_ncores = 112 node_fileserver_hw_nchips = 4 node_fileserver_hw_model = ThinkSystem SR860 V2 node_fileserver_hw_memory = 1536 GB (48 x 32 GB 2Rx8 PC4-3200AA-R) node_fileserver_hw_disk = 1 x 1 TB SATA 2.5" SSD node_fileserver_hw_cpu_name = Intel Xeon Platinum 8380H node_fileserver_hw_cpu_mhz = 2900 node_fileserver_hw_cpu_char = Intel Turbo Boost Technology up to 4.3 GHz node_fileserver_hw_adapter_fs_slot_type = PCI-Express 3.0 x16 node_fileserver_hw_adapter_fs_ports_used = 1 node_fileserver_hw_adapter_fs_model = Nvidia Mellanox ConnectX-6 Series node_fileserver_hw_adapter_fs_interconnect = Nvidia Mellanox ConnectX-6 node_fileserver_hw_adapter_fs_firmware = 20.25.2006 node_fileserver_hw_adapter_fs_driver = 5.1-0.6.6 node_fileserver_hw_adapter_fs_data_rate = 200 Gb/s node_fileserver_hw_adapter_fs_count = 1 node_fileserver_count = 1 system_name000 = ThinkSystem SR860 V2 system_name001 = (Intel Xeon Platinum 8380H CPU, 2.90 GHz) # # Computation node info [Node_Description: Hardware] # node_compute_label = ThinkSystem SR860 V2 node_compute_order = 1 node_compute_count = 1 node_compute_purpose = compute node_compute_hw_vendor = Lenovo Global Technology node_compute_hw_model = ThinkSystem SR860 V2 node_compute_hw_cpu_name = Intel Xeon Platinum 8380H node_compute_hw_ncpuorder = 2,4 chips node_compute_hw_nchips = 4 node_compute_hw_ncores = 112 node_compute_hw_ncoresperchip = 28 node_compute_hw_nthreadspercore = 1 node_compute_hw_cpu_char = Intel Turbo Boost Technology up to 4.3 GHz node_compute_hw_cpu_mhz = 2900 node_compute_hw_pcache = 32 KB I + 32 KB D on chip per core node_compute_hw_scache = 1 MB I+D on chip per core node_compute_hw_tcache = 39424 KB I+D on chip per chip node_compute_hw_ocache = None node_compute_hw_memory = 1536 GB (48 x 32 GB 2Rx8 PC4-3200AA-R) node_compute_hw_disk = 1 x 1 TB SATA 2.5" SSD node_compute_hw_other = N/A # #[Node_Description: Software] # node_compute_hw_adapter_fs_model = Nvidia Nvidia Mellanox ConnectX-6 HDR Infiniband node_compute_hw_adapter_fs_count = 1 node_compute_hw_adapter_fs_slot_type = PCI-Express 3.0 x16 node_compute_hw_adapter_fs_data_rate = 200 Gb/s node_compute_hw_adapter_fs_ports_used = 1 node_compute_hw_adapter_fs_interconnect = Nvidia Mellanox ConnectX-6 HDR Infiniband node_compute_hw_adapter_fs_driver = 5.1-0.6.6 node_compute_hw_adapter_fs_firmware = 20.25.2006 node_compute_sw_os000 = SUSE Linux Enterprise Server 15 SP2 node_compute_sw_os001 = 5.3.18-22-default node_compute_sw_localfile = xfs node_compute_sw_sharedfile = None node_compute_sw_state = Multi-user, run level 3 node_compute_sw_other = None # # Cluster file system interconnect # interconnect_fs_label = Nvidia Mellanox ConnectX-6 HDR Infiniband interconnect_fs_order = 0 interconnect_fs_purpose = MPI and I/O traffic interconnect_fs_hw_vendor = Nvidia interconnect_fs_hw_model = Nvidia Mellanox ConnectX-6 HDR Infiniband interconnect_fs_hw_switch_fs_model = Nvidia Mellanox QM8700 interconnect_fs_hw_switch_fs_count = 1 interconnect_fs_hw_switch_fs_ports = 40 interconnect_fs_hw_topo = Mesh interconnect_fs_hw_switch_fs_data_rate = 200 Gb/s interconnect_fs_hw_switch_fs_firmware = 3.9.0606 # # Hardware # system_class = Homogeneous max_ranks = 28 max_peak_ranks = N/A # # 1_Right_Software # sw_c_compiler000= Intel Parallel Studio C Compiler 20 Update 2 for sw_c_compiler001 = Linux sw_c_compiler002 = Version 19.1.2.254 Build 20200623 sw_cxx_compiler000= Intel Parallel Studio C++ Compiler 20 Update 2 sw_cxx_compiler001 = for Linux sw_cxx_compiler002 = Version 19.1.2.254 Build 20200623 sw_f_compiler000= Intel Parallel Studio Fortran Compiler 20 Update sw_f_compiler001 = 2 for Linux sw_f_compiler002 = Version 19.1.2.254 Build 20200623 sw_auto_parallel = sw_base_ptrsize = 64-bit sw_peak_ptrsize = Not Applicable sw_mpi_library000 = Intel Parallel Studio MPI Library for Linux* OS sw_mpi_library001 = Version 2020 Update 2 Build 20200624 sw_mpi_other = None sw_preprocessors = No sw_other = None # # General notes # notes_010 = notes_015 = RAM configuration: notes_020 = Compute nodes have 2 x 32 GB RDIMM on each memory channel. notes_025 = notes_030 = BIOS settings: notes_035 = Operating Mode : Maximum Performance Mode notes_040 = Intel Hyper-Threading Technology (SMT): Disabled notes_045 = SNC (Sub-NUMA Cluster): Enable notes_050 = notes_055 = notes_060 = Yes: The test sponsor attests, as of date of publication, notes_065 = that CVE-2017-5754 (Meltdown) is mitigated in the system as tested and documented. notes_070 = notes_075 = Yes: The test sponsor attests, as of date of publication, notes_080 = that CVE-2017-5753 (Spectre variant 1) is mitigated in the system as tested and documented. notes_085 = notes_090 = Yes: The test sponsor attests, as of date of publication, notes_095 = that CVE-2017-5715 (Spectre variant 2) is mitigated in the system as tested and documented. notes_100 = # ---- End inclusion of '/home/root/NFS/config/Proton8S.inc' ##################################################################### # Portability flags ##################################################################### 113.GemsFDTD=peak=default=default: ranks=224 121.pop2=default=default=default: CPORTABILITY = -DSPEC_MPI_CASE_FLAG 126.lammps=default=default=default: CXXPORTABILITY = -DMPICH_IGNORE_CXX_SEEK 127.wrf2=default=default=default: CPORTABILITY = -DSPEC_MPI_CASE_FLAG -DSPEC_MPI_LINUX 129.tera_tf=default=default=default: srcalt=add_rank_support 130.socorro=default=default=default: #srcalt=nullify_ptrs FPORTABILITY = -assume nostd_intent_in 143.dleslie=default=default=default: #srcalt=integer_overflow ################################################################# # Optimization flags ################################################################# default=default=default=default: OPTIMIZE = -O3 -ipo -xCORE-AVX512 -no-prec-div submit = mpiexec -hosts 192.168.99.174 -genv I_MPI_FABRICS shm:ofi -genv I_MPI_COMPATIBILITY=3 -genv I_MPI_HYDRA_PMI_CONNECT=alltoall -np $ranks $command # The following section was added automatically, and contains settings that # did not appear in the original configuration file, but were added to the # raw file after the run. default: notes_000 = MPI startup command: notes_005 = mpiexec command was used to start MPI jobs. flagsurl000 = http://www.spec.org/mpi2007/flags/EM64T_Intel121_flags.20201007.xml flagsurl001 = http://www.spec.org/mpi2007/flags/Lenovo-SPECmpiM_Platform_Flags.xml