# Invocation command line: # /home/hpc2021/bin/harness/runhpc -c oneapi2023_4nodes.cfg --reportable --tune=base --define EXPID=intel.4nodes --define model=omp --define ppn=2 --threads=20 --rank=4 --size=ref --iterations=3 small # output_root was not used for this run ############################################################################ #!/bin/sh ###################################################################### # Configuration file for the Intel Compilers # # Defines: "acctype" => "mpi", "omp", default "mpi" # "label" => ext base label, default "hpc" # # ####################################################################### #strict_rundir_verify=0 allow_label_override=yes expid= %ifdef %{EXPID} expid=%{EXPID} %endif %ifndef %{label} # IF label is not set use hpc % define label hpc %endif %ifndef %{model} # IF model is not set use mpi % define model mpi %endif teeout = yes makeflags=-j 40 # License and tester information license_num = 6569 showtimer = 0 test_sponsor = Supermicro tester = Supermicro #include: SUTinfo.inc # ----- Begin inclusion of 'SUTinfo.inc' ############################################################################ ###################################################### # Example configuration information for a # system under test (SUT) Section ###################################################### # General SUT info system_vendor = Supermicro system_name000 = SuperServer SYS-F511E2-RT (Intel Xeon Gold system_name001 = 5433N) hw_avail = Jan-2023 sw_avail = Jan-2023 prepared_by = Supermicro # Computation node info # [Node_Description: Hardware] node_compute_syslbl = SuperServer SYS-F511E2-RT node_compute_order = 1 node_compute_count = 4 node_compute_purpose = compute node_compute_hw_vendor = Supermicro node_compute_hw_model = SuperServer SYS-F511E2-RT node_compute_hw_cpu_name = Intel Xeon Gold 5433N node_compute_hw_ncpuorder = 1 chip node_compute_hw_nchips = 1 node_compute_hw_ncores = 20 node_compute_hw_ncoresperchip = 20 node_compute_hw_nthreadspercore = 2 node_compute_hw_cpu_char = Intel Turbo Boost Technology up to 4.1 GHz node_compute_hw_cpu_mhz = 1900 node_compute_hw_pcache = 32 KB I + 48 KB D on chip per core node_compute_hw_scache = 2 MB I+D on chip per core node_compute_hw_tcache= 37.5 MB I+D on chip per chip node_compute_hw_ocache = None node_compute_hw_memory = 512 GB (8 x 64 GB 2Rx4 PC5-4800B-R) node_compute_hw_disk = 1 x 480 GB Micron M.2 NVMe SSD node_compute_hw_other = None #[Node_Description: Accelerator] node_compute_hw_accel_model = None node_compute_hw_accel_count = 0 node_compute_hw_accel_vendor= None node_compute_hw_accel_type = None node_compute_hw_accel_connect = None node_compute_hw_accel_ecc = None node_compute_hw_accel_desc = None #[Node_Description: Software] node_compute_hw_adapter_fs_model = Supermicro AOC-ATG-i2TM node_compute_hw_adapter_fs_count = 1 node_compute_hw_adapter_fs_slot_type = Advanced I/O Module (AIOM) Form Factor node_compute_hw_adapter_fs_data_rate = 10 Gb/s node_compute_hw_adapter_fs_ports_used = 1 node_compute_hw_adapter_fs_interconnect = AOC-ATG-i2TM node_compute_hw_adapter_fs_driver = None node_compute_hw_adapter_fs_firmware = None node_compute_sw_os000 = SUSE Linux Enterprise Server 15 SP4 node_compute_sw_os001 = Kernel 5.14.21-150400.22-default node_compute_sw_localfile = xfs node_compute_sw_sharedfile = None node_compute_sw_state = Multi-user, run level 3 node_compute_sw_other = None #[Fileserver] #[Interconnect] interconnect_fs_syslbl = Supermicro AOC-ATG-i2TM interconnect_fs_order = 0 interconnect_fs_purpose = MPI Traffic, NFS Access interconnect_fs_hw_vendor = None interconnect_fs_hw_model = Supermicro AOC-ATG-i2TM interconnect_fs_hw_switch_fs_model = None interconnect_fs_hw_switch_fs_count = 1 interconnect_fs_hw_switch_fs_ports = 0 interconnect_fs_hw_topo = None interconnect_fs_hw_switch_fs_data_rate = 10 Gb/s interconnect_fs_hw_switch_fs_firmware = None ####################################################################### # End of SUT section # If this config file were to be applied to several SUTs, edits would # be needed only ABOVE this point. ###################################################################### # ---- End inclusion of '/home/hpc2021/config/SUTinfo.inc' sw_compiler000 = C/C++/Fortran: Version 2023.0.0 of sw_compiler001 = Intel oneAPI Compiler sw_mpi_library000 = Intel MPI Library for Linux* OS, sw_mpi_library = Version 2021.8.0 Build 20221129 sw_mpi_other = None sw_other = None notes_000 = MPI startup command: notes_005 = mpirun command (mpiexec.hydra) was used to start MPI jobs. label = %{label}_%{model} tune = base output_format = text use_submit_for_speed = 1 flagsurl000=http://www.spec.org/hpc2021/flags/Intel_compiler_flags.2023-06-05.xml # Compiler Settings default: AR = ar ARFLAGS = cr CXX = mpiicpc -cxx=icpx CC = mpiicc -cc=icx FC = mpiifort -fc=ifx system_class = Homogenous # Compiler Version Flags CC_VERSION_OPTION = --version CXX_VERSION_OPTION = --version FC_VERSION_OPTION = --version default=base=default: OPTIMIZE = -Ofast -ipo -xCORE-AVX512 -mprefer-vector-width=512 COPTIMIZE = -ansi-alias CXXOPTIMIZE = -ansi-alias FOPTIMIZE = -nostandard-realloc-lhs -align array64byte PORTABILITY = -lstdc++ mpicmd = mpiexec.hydra -bootstrap ssh -hostfile $[top]/hostfile -genv OMP_NUM_THREADS $threads -np $ranks -ppn $ppn $command submit = $mpicmd default=base=default: ranks = %{ranks} threads = %{threads} ppn = %{ppn} %if %{model} eq 'mpi' pmodel=MPI %endif # OpenACC flags %if %{model} eq 'acc' pmodel=ACC OPTIMIZE += -fopenacc -foffload=-lm %endif # OpenMP (CPU) flags %if %{model} eq 'omp' pmodel=OMP OPTIMIZE += -fiopenmp %endif # OpenMP Targeting host flags %if %{model} eq 'tgt' pmodel=TGT OPTIMIZE += -fopenmp %endif # OpenMP Targeting Nvidia GPU flags %if %{model} eq 'tgtnv' pmodel=TGT OPTIMIZE += -fopenmp -fopenmp-targets=nvptx64-nvidia-cuda %endif # No peak flags set, so make peak use the same flags as base default=peak=default: basepeak=1 513.soma_t=base,peak: PORTABILITY+=-DSPEC_NO_VAR_ARRAY_REDUCE 528.pot3d_t,628.pot3d_s,728.pot3d_m,828.pot3d_l=base,peak: PORTABILITY+=-DSPEC_NO_REORDER OPTIMIZE+=-Wno-incompatible-function-pointer-types FOPTIMIZE+= -heap-arrays 32768 # The following section was added automatically, and contains settings that # did not appear in the original configuration file, but were added to the # raw file after the run. default: notes_submit_000 =mpiexec.hydra -bootstrap ssh -hostfile $[top]/hostfile -genv OMP_NUM_THREADS $threads -np $ranks -ppn $ppn $command