# Invocation command line: # /work/bw579735/SPEC/rc-1.0.2/bin/harness/runhpc --reportable --config=rwth_claix -I --action=run --iterations=3 --tune=base --define pmodel=mpi --ranks=768 --define peakranks=704 small # output_root was not used for this run ############################################################################ ###################################################################### # Example configuration file for the Intel Compilers # # Defines: "pmodel" => "mpi", "omp" default "mpi" # "label" => ext base label, default "intel" # # MPI-only Command: # runhpc -c Example_intel --reportable -T base --define pmodel=mpi --ranks=40 small # # OpenMP Command: # runhpc -c Example_intel --reportable -T base --define pmodel=omp --ranks=1 --threads=40 small # ####################################################################### %ifndef %{label} # IF label is not set use intel % define label intel %endif %ifndef %{pmodel} # IF pmodel is not set use mpi % define pmodel MPI %endif teeout = yes makeflags=-j 10 # Tester Information license_num = 055A test_sponsor = RWTH Aachen University tester = RWTH Aachen University ranks = %{ranks} peakranks = %{peakranks} ###################################################### # SUT Section ###################################################### #include: claix18.inc # ----- Begin inclusion of 'claix18.inc' ############################################################################ ###################################################### # Example configuration information for a # system under test (SUT) Section ###################################################### # General SUT info system_vendor = NEC Corporation system_name = CLAIX-2018: Intel Compute Module HNS2600BPM (Intel Xeon Platinum 8160) node_compute_syslbl = Intel HNS2600BPB interconnect_fs_syslbl = Intel Omni-Path 100 Series hw_avail = Nov-2018 sw_avail = Sep-2020 prepared_by = Bo Wang, Sandra Wienke # Computation node info # [Node_Description: Hardware] node_compute_label = None node_compute_order = 1 node_compute_count = 16 node_compute_purpose = compute node_compute_hw_vendor = Intel Corporation node_compute_hw_model = Intel Compute Module HNS2600BPB node_compute_hw_cpu_name = Intel Xeon Platinum 8160 node_compute_hw_ncpuorder = 1-2 chips node_compute_hw_nchips = 2 node_compute_hw_ncores = 48 node_compute_hw_ncoresperchip = 24 node_compute_hw_nthreadspercore = 1 node_compute_hw_cpu_char = Intel Turbo Boost Technology up to 3.7 GHz node_compute_hw_cpu_mhz = 2100 node_compute_hw_pcache = 32 KB I + 32 KB D on chip per core node_compute_hw_scache = 1 MB I+D on chip per core node_compute_hw_tcache = 33 MB I+D on chip per chip node_compute_hw_ocache = None node_compute_hw_memory = 192 GB (12 x 16 GB 2RX4 PC4-2666V-R ) node_compute_hw_disk = Intel SSDSC2KG48, 480GB, SATA node_compute_hw_other = None #[Node_Description: Accelerator] #node_compute_hw_accel_model = Tesla V100-NVLINK-16GB #node_compute_hw_accel_count = 2 per node #node_compute_hw_accel_vendor= NVIDIA Corporation #node_compute_hw_accel_type = GPU #node_compute_hw_accel_connect = NVLINK #node_compute_hw_accel_ecc = Yes #node_compute_hw_accel_desc = See Notes #[Node_Description: Software] node_compute_hw_adapter_fs_model = Omni-Path HFI Silicon 100 Series node_compute_hw_adapter_fs_count = 1 node_compute_hw_adapter_fs_slot_type = PCI Express Gen3 x16 node_compute_hw_adapter_fs_data_rate = 100Gbits/s node_compute_hw_adapter_fs_ports_used = 1 node_compute_hw_adapter_fs_interconnect = Omni-Path node_compute_hw_adapter_fs_driver = ib_ipoib 1.0.0 node_compute_hw_adapter_fs_firmware = 1.27.0 node_compute_sw_os000 = CentOS Linux release 7.9.2009 node_compute_sw_localfile = xfs node_compute_sw_sharedfile = 1.4 PB NFS (Concat EMC Isilon X410) over Omni-Path node_compute_sw_state = Multi-user, run level 3 node_compute_sw_other = None #[Fileserver] node_fileserver_count = 15 node_fileserver_label = NFS #[Interconnect] interconnect_fs_order = 0 interconnect_fs_purpose = MPI Traffic interconnect_fs_hw_vendor = Intel interconnect_fs_hw_model = Edge Switch 100 Series interconnect_fs_hw_switch_fs_model000= BI 100 Series 48 Port 2 interconnect_fs_hw_switch_fs_model001 = PSU interconnect_fs_hw_switch_fs_count = 48 interconnect_fs_hw_switch_fs_ports = 48 interconnect_fs_hw_topo = Fat tree interconnect_fs_hw_switch_fs_data_rate = 100 Gb/s interconnect_fs_hw_switch_fs_firmware = 10.8.2.0.6 ####################################################################### # End of SUT section # If this config file were to be applied to several SUTs, edits would # be needed only ABOVE this point. ###################################################################### # ---- End inclusion of '/work/bw579735/SPEC/rc-1.0.2/config/claix18.inc' #[Software] sw_compiler000 = C/C++/Fortran: sw_compiler001 = Intel Compilers for Linux 2021.3.0 sw_mpi_library = Intel MPI Library for Linux 2018.4.274 sw_mpi_other = None system_class = Homogenous sw_other = None #[General notes] notes_000 = notes_005 =The test sponsor attests, as of date of publication, notes_010 =that CVE-2017-5754 (Meltdown) is mitigated in the system as tested and documented. notes_015 =The test sponsor attests, as of date of publication, notes_020 =that CVE-2017-5753 (Spectre variant 1) is mitigated in the system as tested and documented. notes_025 =The test sponsor attests, as of date of publication, notes_030 =that CVE-2017-5715 (Spectre variant 2) is mitigated in the system as tested and documented. notes_035 = notes_040 =This benchmark result is intended to provide perspective on notes_045 =past performance using the historical hardware and/or notes_050 =software described on this result page. notes_055 = notes_060 =The system as described on this result page was formerly notes_065 =generally available. At the time of this publication, it may notes_070 =not be shipping, and/or may not be supported, and/or may fail notes_075 =to meet other tests of General Availability described in the notes_080 =SPEC HPG Policy document, http://www.spec.org/hpg/policy.html notes_085 = notes_090 =This measured result may not be representative of the result notes_095 =that would be measured were this benchmark run with hardware notes_100 =and software available as of the publication date. notes_105 = flagsurl000= http://www.spec.org/hpc2021/flags/RWTH-Aachen-CLAIX.xml ####################################################################### # End of SUT section ###################################################################### ###################################################################### # The header section of the config file. Must appear # before any instances of "section markers" (see below) # # ext = how the binaries you generated will be identified # tune = specify "base" or "peak" or "all" label = %{label}_%{pmodel} tune = base,peak output_format = text,csv use_submit_for_speed = 1 # Compiler Settings default: CC = ${MPICC} CXX = ${MPICXX} FC = ${MPIFC} # Compiler Version Flags CC_VERSION_OPTION = -V CXX_VERSION_OPTION = -V FC_VERSION_OPTION = -V # MPI options and binding environment, dependent upon Model being run # Adjust to match your system # OpenMP (CPU) Settings %if %{pmodel} eq 'omp' preENV_OMP_PROC_BIND=close preENV_OMP_PLACES=cores %endif # preENV_I_MPI_DEBUG=10 # preENV_I_MPI_TUNING_BIN=$WORK/spec/hpc2021-brc2/result/tuning_results_718.dat #preENV_I_MPI_TUNUNG_MODE=auto #preENV_I_MPI_TUNING_BIN_DUMP=./718_tuning_results_close.dat #preENV_PSM2_MULTI_EP=0 # MPIRUN_OPTS = --bind-to none # Default is spread binding, set to compact binding #preENV_I_MPI_PIN_PROCESSOR_LIST=allcores:map=bunch default=default=default: MPIRUN_OPTS = --cpu-bind=map_cpu:0,1,2,6,7,8,12,13,14,18,19,20,3,4,5,9,10,11,15,16,17,21,22,23,24,25,26,30,31,32,36,37,38,42,43,44,27,28,29,33,34,35,39,40,41,45,46,47 submit = srun ${MPIRUN_OPTS} -n $ranks $command ####################################################################### # Optimization # Note that SPEC baseline rules require that all uses of a given compiler # use the same flags in the same order. See the SPEChpc Run Rules # for more details # http://www.spec.org/hpc2021/Docs/runrules.html # # OPTIMIZE = flags applicable to all compilers # FOPTIMIZE = flags appliable to the Fortran compiler # COPTIMIZE = flags appliable to the C compiler # CXXOPTIMIZE = flags appliable to the C++ compiler # # See your compiler manual for information on the flags available # for your compiler # Compiler flags applied to all models default=default=default: OPTIMIZE = COPTIMIZE = -O3 -ansi-alias -ipo CXXOPTIMIZE = -O3 -ansi-alias -ipo FOPTIMIZE = -O3 -ipo -no-prec-div # OpenMP (CPU) flags %if %{pmodel} eq 'omp' pmodel=OMP OPTIMIZE += -qopenmp %endif # No peak flags set, so make peak use the same flags as base ####################################################################### # Portability ####################################################################### 513.soma_t,613.soma_s,713.soma_m=default=default: PORTABILITY += -DSPEC_NO_VAR_ARRAY_REDUCE 718.tealeaf_m,719.clvleaf_m,728.pot3d_m,734.hpgmgfv_m=peak=default: MPIRUN_OPTS = --cpu-bind=map_cpu:0,1,2,6,7,8,12,13,14,18,19,20,3,4,5,9,10,11,15,16,17,21,22,23,24,25,26,30,31,32,36,37,38,42,43,44,27,28,29,33,34,35,39,40,41,45,46,47 submit = srun ${MPIRUN_OPTS} -n ${peakranks} $command # The following section was added automatically, and contains settings that # did not appear in the original configuration file, but were added to the # raw file after the run. default: notes_submit_000 = MPI startup command: notes_submit_005 = srun command was used to start MPI jobs