# Invocation command line: # /scratch1/07893/junjieli/mpix/v1.0.2/bin/harness/runhpc --reportable -c intel --size=ref --ranks=1024 --define model=omp --threads=27 --define ppn=2 large --define extra=impi_n512_r1024_t27 --expid=intel-large # output_root was not used for this run ############################################################################ ###################################################################### # Example configuration file for the Intel Compilers # # Defines: "model" => "mpi", "omp" default "mpi" # "label" => ext base label, default "intel" # # MPI-only Command: # runhpc -c Example_intel --reportable -T base --define model=mpi --ranks=40 small # # OpenMP Command: # runhpc -c Example_intel --reportable -T base --define model=omp --ranks=1 --threads=40 small # ####################################################################### %ifndef %{label} # IF label is not set use intel % define label intel %endif %ifndef %{model} # IF model is not set use mpi % define model mpi %endif #expid=intel teeout = yes makeflags=-j 40 #flagsurl=$[top]/intel_flags.xml # Tester Information license_num = 6340 test_sponsor = Texas Advanced Computing Center tester = Texas Advanced Computing Center ###################################################### # SUT Section ###################################################### #include: Example_SUT.inc # ----- Begin inclusion of 'Example_SUT.inc' ############################################################################ ###################################################### # Example configuration information for a # system under test (SUT) Section ###################################################### # General SUT info system_vendor = Dell Inc. system_name = Frontera: PowerEdge C6420 (Intel Xeon Platinum 8280) node_compute_hw_adapter_ib_slot_type = PCIe 3.0 x16 node_compute_hw_adapter_ib_ports_used = 1 node_compute_hw_adapter_ib_model = NVIDIA ConnectX-6 VPI Infiniband Adapter Card node_compute_hw_adapter_ib_interconnect = Infiniband HDR100 node_compute_hw_adapter_ib_firmware = 20.25.7020 node_compute_hw_adapter_ib_driver = 5.1-2.5.8.0 node_compute_hw_adapter_ib_data_rate = 100Gb/s node_compute_hw_adapter_ib_count = 1 interconnect_ib_syslbl = InfiniBand interconnect_ib_purpose = MPI traffic and Lustre access interconnect_ib_order = 1 interconnect_ib_hw_vendor = NVIDIA interconnect_ib_hw_topo = Fat Tree (blocking factor 22:18) interconnect_ib_hw_switch_chassis_ports = 40 interconnect_ib_hw_switch_chassis_firmware = 27.2008.2102 interconnect_ib_hw_switch_chassis_data_rate = 200 Gb/s interconnect_ib_hw_switch_chassis_count = 202 interconnect_ib_hw_switch_central_ports = 600 interconnect_ib_hw_switch_central_firmware = 27.2000.1386 interconnect_ib_hw_switch_central_data_rate = 200 Gb/s interconnect_ib_hw_switch_central_count = 6 interconnect_ib_hw_model = NVIDIA Infiniband HDR hw_avail = Jun-2019 sw_avail = Dec-2020 prepared_by = Junjie Li # Computation node info # [Node_Description: Hardware] node_compute_syslbl = PowerEdge C6420 node_compute_order = 1 node_compute_count = 512 node_compute_purpose = compute node_compute_hw_vendor = Dell Inc. node_compute_hw_model = PowerEdge C6420 node_compute_hw_cpu_name = Intel Xeon Platinum 8280 node_compute_hw_ncpuorder = 1 chips node_compute_hw_nchips = 2 node_compute_hw_ncores = 56 node_compute_hw_ncoresperchip = 28 node_compute_hw_nthreadspercore = 1 node_compute_hw_cpu_char = Turbo up to 4.0 GHz node_compute_hw_cpu_mhz = 2700 node_compute_hw_pcache = 32 KB I + 32 KB D on chip per core node_compute_hw_scache = 1 MB I+D on chip per core node_compute_hw_tcache000= 38.5 MB I+D on chip per chip node_compute_hw_ocache = None node_compute_hw_memory = 192 GB (12 x 16 GB 2Rx8 PC4-2933Y-R) node_compute_hw_disk = 1x 240GB SATA SSD node_compute_hw_other = None #[Node_Description: Accelerator] node_compute_hw_accel_model = node_compute_hw_accel_ecc = node_compute_hw_accel_desc = #[Node_Description: Software] node_compute_sw_os000 = CentOS Linux release 7.8.2003 node_compute_sw_os001 = 3.10.0-1127.19.1.el7.x86_64 node_compute_sw_localfile = xfs node_compute_sw_sharedfile = 10.6 PB Lustre (DDN SFA18K) over Infiniband HDR100 node_compute_sw_state = Multi-user, run level 3 node_compute_sw_other = None #[Fileserver] #[Interconnect] ####################################################################### # End of SUT section # If this config file were to be applied to several SUTs, edits would # be needed only ABOVE this point. ###################################################################### # ---- End inclusion of '/scratch1/07893/junjieli/mpix/v1.0.2/config/Example_SUT.inc' #[Software] sw_compiler000 = C/C++/Fortran: Version 2020 Update 4 of sw_compiler001 = Intel Compilers for Linux sw_mpi_library = Intel MPI Library 2019 Update 9 for Linux sw_mpi_other = None system_class = Homogenous Cluster sw_other = None #[General notes] notes_000 =Environment settings: notes_005 = ulimit -s unlimited notes_010 = ####################################################################### # End of SUT section ###################################################################### ###################################################################### # The header section of the config file. Must appear # before any instances of "section markers" (see below) # # ext = how the binaries you generated will be identified # tune = specify "base" or "peak" or "all" #%ifdef %{extra} define e_label %{extra} #%endif #%ifdef %{profiler} define p_label %{profiler} #%endif %ifdef %{extra} %ifdef %{profiler} label = %{label}_%{model}_%{extra}_%{profiler} %else label = %{label}_%{model}_%{extra} %endif %else %ifdef %{profiler} label = %{label}_%{model}_%{profiler} %else label = %{label}_%{model} %endif %endif tune = base output_format = text use_submit_for_speed = 1 iterations = 3 # Compiler Settings default: %ifndef %{profiler} CC = mpiicc CXX = mpiicpc FC = mpiifort %elif %{profiler} eq 'aps' CC = mpiicc CXX = mpiicpc FC = mpiifort %elif %{profiler} eq 'scorep' CC = scorep --mpp=mpi --nocompiler mpiicc CXX = scorep --mpp=mpi --nocompiler mpiicpc FC = scorep --mpp=mpi --nocompiler mpiifort %endif # Compiler Version Flags CC_VERSION_OPTION = -V -c CXX_VERSION_OPTION = -V -c FC_VERSION_OPTION = -V -c # MPI options and binding environment, dependent upon Model being run # Adjust to match your system # OpenMP (CPU) Settings #%if %{model} eq 'omp' #preENV_OMP_PROC_BIND=true #preENV_OMP_PLACES=0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,226,27,28,29,30,31,32,33,34,35,36,37,38,39 #%endif #MPIRUN_OPTS = --bind-to none %ifndef %{profiler} %ifndef %{ppn} submit = timeout 60m mpirun -np $ranks $command %else submit = timeout 60m mpirun -np $ranks -ppn %{ppn} $command %endif %elif %{profiler} eq 'aps' %ifndef %{ppn} submit = timeout 60m mympirun $ranks 1 $command %else submit = timeout 60m mympirun $ranks $threads $command %endif %endif ####################################################################### # Optimization # Note that SPEC baseline rules require that all uses of a given compiler # use the same flags in the same order. See the SPEChpc Run Rules # for more details # http://www.spec.org/hpc2021/Docs/runrules.html # # OPTIMIZE = flags applicable to all compilers # FOPTIMIZE = flags appliable to the Fortran compiler # COPTIMIZE = flags appliable to the C compiler # CXXOPTIMIZE = flags appliable to the C++ compiler # # See your compiler manual for information on the flags available # for your compiler # Compiler flags applied to all models default=base=default: #OPTIMIZE = #COPTIMIZE = -O3 -xhost -no-prec-div -ansi-alias -ipo #-std=gnu11 #CXXOPTIMIZE = -O3 -xhost -no-prec-div -ansi-alias -ipo -std=c++14 #FOPTIMIZE = -O3 -xhost -no-prec-div -ipo #OPTIMIZE = -O3 -no-prec-div -fp-model fast=2 -xCORE-AVX512 -ipo -qopt-zmm-usage=high OPTIMIZE = -O3 -no-prec-div -fp-model fast=2 -xCORE-AVX512 -ipo COPTIMIZE = -ansi-alias CPORTABILITY = -std=gnu11 CXXOPTIMIZE = -ansi-alias CXXPORTABILITY = -std=c++11 %if %{model} eq 'mpi' pmodel=MPI %endif # OpenMP (CPU) flags %if %{model} eq 'omp' pmodel=OMP OPTIMIZE += -qopenmp %endif # No peak flags set, so make peak use the same flags as base default=peak=default: basepeak=1 ####################################################################### # Portability ####################################################################### 513.soma_t,613.soma_s=default=default: PORTABILITY += -DSPEC_NO_VAR_ARRAY_REDUCE #PORTABILITY += -std=c99 -DSPEC_NO_VAR_ARRAY_REDUCE #618.tealeaf_s,718.tealeaf_m=default=default: #CPORTABILITY += -std=c99 #CXXPORTABILITY += -std=c++11 # The following section was added automatically, and contains settings that # did not appear in the original configuration file, but were added to the # raw file after the run. default: flagsurl000 = http://www.spec.org/hpc2021/flags/Intel-ic2021-official-linux64_revA.2021-10-20.00.xml interconnect_ib_hw_switch_central_model000 = Quantum CS8500 HDR Modular Switch interconnect_ib_hw_switch_chassis_model000 = Quantum QM8790 HDR Edge Switch notes_submit_000 = mpirun -np $ranks -ppn 2 $command interconnect_ib_notes_000=Full HDR connectivity between switches and HDR100 interconnect_ib_notes_005=connectivity to the compute nodes. Half of nodes interconnect_ib_notes_010=in a rack (44) connect to 22 downlinks of a interconnect_ib_notes_015=chassis switch as pairs of HDR100 links into interconnect_ib_notes_020=HDR200 ports of the chassis switch. The other 18 interconnect_ib_notes_025=ports are uplinks to the six central switches.