# Invocation command line: # /home/root/specmpi2007/bin/runspec --config=2Ns_lenovo_AOCC23_orte_HPCX28_O410_fast_lib.cfg --size=mref --tune=all --reportable --rank=512 --iterations=3 medium # output_root was not used for this run ############################################################################ ##################################################################### # # Config file to run SPEC MPI2007 with Intel Software Toolchain # (Intel Compiler 17.0.2 and Intel MPI 17.1.132) # ##################################################################### env_vars=1 #basepeak=1 reportable=1 ignore_errors=0 iterations=3 makeflags= -j 8 tune=base size=mref use_version_url=1 use_submit_for_speed =1 output_format= all CC = mpicc CXX = mpicxx FC = mpifort ext=OMPI410_AOCC23_orte_HPCX28_fast_lib ##################################################################### # Portability flags ##################################################################### 121.pop2=default=default=default: CPORTABILITY = -DSPEC_MPI_CASE_FLAG 126.lammps=default=default=default: CXXPORTABILITY = -DMPICH_IGNORE_CXX_SEEK 127.wrf2=default=default=default: #CPORTABILITY = -DSPEC_MPI_CASE_FLAG -DSPEC_MPI_LINUX CPORTABILITY = -DSPEC_MPI_CASE_FLAG -DSPEC_MPI_LINUX -Wno-return-type 129.tera_tf=default=default=default: #srcalt=add_rank_support 130.socorro=default=default=default: #srcalt=nullify_ptrs ##FPORTABILITY = -assume nostd_intent_in 143.dleslie=default=default=default: #srcalt=integer_overflow 104.milc=peak=default=default: basepeak=yes 107.leslie3d=peak=default=default: basepeak=yes 113.GemsFDTD=peak=default=default: ranks=128 115.fds4=peak=default=default: basepeak=yes 121.pop2=peak=default=default: basepeak=yes 122.tachyon=peak=default=default: basepeak=yes 126.lammps=peak=default=default: basepeak=yes 127.wrf2=peak=default=default: basepeak=yes 128.GAPgeofem=peak=default=default: basepeak=yes 129.tera_tf=peak=default=default: basepeak=yes #ranks=512 130.socorro=peak=default=default: basepeak=yes 132.zeusmp2=peak=default=default: basepeak=yes 137.lu=peak=default=default: basepeak=yes ################################################################# # Optimization flags ################################################################# default=default=default=default: #OPTIMIZE = -Ofast -flto -ffast-math -march=znver2 -mno-avx2 #-fuse-ld=ld COPTIMIZE = -Ofast -flto -ffast-math -march=znver2 -mno-avx2 CXXOPTIMIZE = -Ofast -flto -ffast-math -march=znver2 -mno-avx2 -mno-avx2 FOPTIMIZE = -Ofast -flto -ffast-math -march=znver2 -mno-avx2 -funroll-loops -ffast-math EXTRA_LIBS = -L/home/amd-libm/lib -lamdlibm -lm EXTRA_FLIBS = -L/home/amd-libm/lib -lamdlibm -lm #submit = mpiexec -hosts localhost -genv I_MPI_PROVIDER psm2 -genv I_MPI_FALLBACK 1 -genv I_MPI_COMPATIBILITY=3 -genv I_MPI_HYDRA_PMI_CONNECT=alltoall -n $ranks $command #1N AOCC2.1 OK #submit = /home/HPC-X/hpcx-v2.5.0-gcc-MLNX_OFED_LINUX-4.7-1.0.0.1-redhat8.1-x86_64/OMPI402_AOCC21_ucx_knem/bin/mpirun --allow-run-as-root --oversubscribe -np $ranks $command #submit = /home/HPC-X/hpcx-v2.5.0-gcc-MLNX_OFED_LINUX-4.7-1.0.0.1-redhat8.1-x86_64/OMPI402_AOCC21_ucx_knem/bin/mpirun --allow-run-as-root -host 192.168.99.161,192.168.99.185 --oversubscribe -mca coll_hcoll_enable 1 -x HCOLL_MAIN_IB=mlx5_0:1 -mca pml ucx -x UCX_TLS=sm,rc_x -np $ranks $command ## 2Ns #submit = /home/HPC-X/hpcx-v2.5.0-gcc-MLNX_OFED_LINUX-4.7-1.0.0.1-redhat8.1-x86_64/OMPI402_AOCC21_ucx_knem/bin/mpirun --allow-run-as-root -host 192.168.99.161,192.168.99.185 --oversubscribe -mca coll_hcoll_enable 1 -x HCOLL_MAIN_IB=mlx5_0:1 -mca pml ucx -x UCX_TLS=sm,dc_x -x UCX_DC_MLX5_NUM_DCI=16 --map-by numa -np $ranks $command #submit = mpirun --allow-run-as-root -host 192.168.99.185:128,192.168.99.166:128 --oversubscribe -mca coll_hcoll_enable 1 -x HCOLL_MAIN_IB=mlx5_0:1 -mca pml ucx -x UCX_TLS=sm,dc_x --map-by numa -np $ranks $command # -x UCX_DC_MLX5_NUM_DCI=15 submit = mpirun --allow-run-as-root --oversubscribe -hostfile /home/root/specmpi2007/config/4nodes -mca coll_hcoll_enable 1 -x HCOLL_MAIN_IB=mlx5_0:1 -mca pml ucx -x UCX_TLS=sm,dc_x --map-by core --bind-to core -np $ranks $command ## 1N #submit = /home/HPC-X/hpcx-v2.5.0-gcc-MLNX_OFED_LINUX-4.7-1.0.0.1-redhat8.1-x86_64/OMPI402_AOCC21_ucx_knem/bin/mpirun --allow-run-as-root -x HCOLL_MAIN_IB=mlx5_0:1 --map-by numa -np $ranks $command # --bind-to none --oversubscribe # The following section was added automatically, and contains settings that # did not appear in the original configuration file, but were added to the # raw file after the run. default: flagsurl000 = http://www.spec.org/mpi2007/flags/AMD_flags.20210315.xml sw_base_ptrsize = 64-bit sw_peak_ptrsize = Not Applicable sw_other = None sw_mpi_other = None sw_preprocessors = No test_sponsor = Lenovo Global Technology license_num = 28 tester = Lenovo Global Technology hw_avail = Mar-2021 sw_avail = Mar-2021 prepared_by = Lenovo Global Technology system_vendor = Lenovo Global Technology node_fileserver_sw_state = Multi-User, run level 3 node_fileserver_sw_sharedfile = NFS node_fileserver_sw_other = None node_fileserver_sw_os = Red Hat Enterprise Linux Server release 8.3 node_fileserver_sw_localfile = None node_fileserver_purpose = Fileserver node_fileserver_order = 2 node_fileserver_label = NFS node_fileserver_hw_vendor = Lenovo Global Technology node_fileserver_hw_scache = 512 KB I+D on chip per core node_fileserver_hw_pcache = 32 KB I + 32 KB D on chip per core node_fileserver_hw_other = None node_fileserver_hw_ocache = None node_fileserver_hw_nthreadspercore = 1 node_fileserver_hw_ncpuorder = 1-2 chips node_fileserver_hw_ncoresperchip = 64 node_fileserver_hw_ncores = 128 node_fileserver_hw_nchips = 2 node_fileserver_hw_model = ThinkSystem SR665 node_fileserver_hw_memory = 1 TB (16 x 64 GB 2Rx4 PC4-3200AA-R) node_fileserver_hw_disk = 1 x 480 GB SATA 2.5" SSD node_fileserver_hw_cpu_name = AMD EPYC 7763 CPU node_fileserver_hw_cpu_mhz = 2450 node_fileserver_hw_cpu_char = None node_fileserver_hw_adapter_fs_slot_type = PCI-Express 4.0 x16 node_fileserver_hw_adapter_fs_ports_used = 1 node_fileserver_hw_adapter_fs_model = Mellanox ConnectX-6 HDR Infiniband node_fileserver_hw_adapter_fs_interconnect = Mellanox ConnectX-6 HDR Infiniband node_fileserver_hw_adapter_fs_firmware = 20.25.2006 node_fileserver_hw_adapter_fs_driver = 5.2-1.0.4 node_fileserver_hw_adapter_fs_data_rate = 200 Gb/s node_fileserver_hw_adapter_fs_count = 1 node_fileserver_count = 1 node_compute_sw_state = Multi-user, run level 3 node_compute_sw_sharedfile = None node_compute_sw_other = None node_compute_sw_localfile = xfs node_compute_purpose = compute node_compute_order = 1 node_compute_label = ThinkSystem SR665 node_compute_hw_vendor = Lenovo Global Technology node_compute_hw_scache = 512 KB I+D on chip per core node_compute_hw_pcache = 32 KB I + 32 KB D on chip per core node_compute_hw_other = None node_compute_hw_ocache = None node_compute_hw_nthreadspercore = 1 node_compute_hw_ncpuorder = 1-2 chips node_compute_hw_ncoresperchip = 64 node_compute_hw_ncores = 128 node_compute_hw_nchips = 2 node_compute_hw_model = SR665 node_compute_hw_memory = 1 TB (16 x 64 GB 2Rx4 PC4-3200AA-R) node_compute_hw_disk = 1 x 480 GB SATA 2.5" SSD node_compute_hw_cpu_name = AMD EPYC 7763 node_compute_hw_cpu_mhz = 2450 node_compute_hw_cpu_char = None node_compute_hw_adapter_fs_slot_type = PCI-Express 4.0 x16 node_compute_hw_adapter_fs_ports_used = 1 node_compute_hw_adapter_fs_model = Mellanox ConnectX-6 HDR Infiniband node_compute_hw_adapter_fs_interconnect = Mellanox ConnectX-6 HDR Infiniband Adapter node_compute_hw_adapter_fs_firmware = 20.25.2006 node_compute_hw_adapter_fs_driver = 5.2-1.0.4 node_compute_hw_adapter_fs_data_rate = 200 Gbs/s node_compute_hw_adapter_fs_count = 1 node_compute_count = 4 interconnect_fs_purpose = MPI Traffic interconnect_fs_order = 0 interconnect_fs_label = Mellanox ConnectX-6 HDR interconnect_fs_hw_vendor = Mellanox interconnect_fs_hw_topo = Mesh interconnect_fs_hw_switch_fs_ports = 40 interconnect_fs_hw_switch_fs_model = QM8700 Series interconnect_fs_hw_switch_fs_firmware = 3.9.0606 interconnect_fs_hw_switch_fs_data_rate = 200 Gb/s interconnect_fs_hw_switch_fs_count = 1 interconnect_fs_hw_model = Infiniband HDR 200Gb/s Switch node_compute_hw_tcache000 = 256 MB I+D on chip per chip node_compute_hw_tcache001 = 32 MB shared / 8 cores node_compute_sw_os000 = Red Hat Enterprise Linux Server release 8.3 node_compute_sw_os001 = 4.18.0-240.el8.x86_64 node_fileserver_hw_tcache001 = 256 MB I+D on chip per chip node_fileserver_hw_tcache002 = 32 MB shared / 8 cores sw_c_compiler000 = AMD Optimizing C Compiler for Linux sw_c_compiler001 = Version 2.3.0 Build 2020_11_10 sw_cxx_compiler000 = AMD Optimizing C++ Compiler for Linux sw_cxx_compiler001 = Version 2.3.0 Build 2020_11_10 sw_f_compiler000 = AMD Optimizing Fortran Compiler for Linux sw_f_compiler001 = Version 2.3.0 Build 2020_11_10 sw_mpi_library000 = Open MPI Library sw_mpi_library001 = Version 4.1.0 system_class = Homogeneous system_name000 = ThinkSystem SR665 system_name001 = (AMD EPYC 7763, 2.45 GHz) notes_000 = MPI startup command: notes_005 = mpiexec command was used to start MPI jobs. notes_010 = RAM configuration: notes_015 = Compute nodes have 1 x 64 GB RDIMM on each memory channel. notes_020 = Add "idle=poll" into grub notes_025 = BIOS settings: notes_030 = Operating Mode : Maximum Performance Mode notes_035 = Hyper-Threading Technology (SMT): Enabled notes_040 = NPS4 notes_045 = Yes: The test sponsor attests, as of date of publication, notes_050 = that CVE-2017-5754 (Meltdown) is mitigated in the system as tested and documented. notes_055 = Yes: The test sponsor attests, as of date of publication, notes_060 = that CVE-2017-5753 (Spectre variant 1) is mitigated in the system as tested and documented. notes_065 = Yes: The test sponsor attests, as of date of publication, notes_070 = that CVE-2017-5715 (Spectre variant 2) is mitigated in the system as tested and documented.