# Invocation command line: # /home/specmpi2007/bin/runspec --config=MPIM2007.014.mref.cfg --size=mref --tune=base --reportable --rank=192 --iterations=3 medium # output_root was not used for this run ############################################################################ # Invocation command line: # /home/specmpi2007/bin/runspec --config=aocc --size=mref --tune=base --noreportable --rank=192 --iterations=3 104 # output_root was not used for this run ############################################################################ ############################################################################ # Only modify the binary label extension if you plan to rebuild the binaries. # The following setting was inserted automatically as a result of # post-run basepeak application. basepeak = 1 %define ext amd-aocc40-mpi2007 % define build_ncpus 16 # controls number of simultaneous compiles #preenv = 1 #makeflags = --jobs=%{build_ncpus} --load-average=%{build_ncpus} #preENV_OMP_SCHEDULE = static #preENV_OMP_DYNAMIC = false #preENV_OMP_THREAD_LIMIT = 128 #preENV_GOMP_CPU_AFFINITY = 0-128 #preENV_OMP_STACKSIZE = 128M #preENV_KMP_LIBRARY = turnaround #preENV_KMP_BLOCKTIME = 200 #preENV_OMP_NESTED = FALSE #preENV_OMP_PLACES = threads ################################################################################ # Header settings ################################################################################ default: CC = mpicc CXX = mpic++ FC = mpif90 CLD = mpicc CXXLD = mpic++ FLD = mpif90 CC_VERSION_OPTION = --version CXX_VERSION_OPTION = --version FC_VERSION_OPTION = --version ranks = 192 submit = mpirun --allow-run-as-root --oversubscribe -host 192.168.99.15 -mca coll_hcoll_enable 1 -x HCOLL_MAIN_IB=mlx5_0:1 -mca pml ucx -x UCX_TLS=sm -np $ranks $command default:# data model applies to all benchmarks MATHLIBOPT = -lamdlibm -lm ################################################################################ # Tuning Flags ################################################################################ 121.pop2=default=default=default: CPORTABILITY = -DSPEC_MPI_CASE_FLAG 126.lammps=default=default=default: CXXPORTABILITY = -DMPICH_IGNORE_CXX_SEEK 127.wrf2=default=default=default: CPORTABILITY = -DSPEC_MPI_CASE_FLAG -DSPEC_MPI_LINUX -Wno-return-type #FPORTABILITY = -Mbyteswapio 129.tera_tf=default=default=default: srcalt=add_rank_support ################################################################# # Optimization flags ################################################################# default=default=default=default: EXTRA_CPORTABILITY = -DSPEC_MPI_LP64 COPTIMIZE = -Ofast -flto -ffast-math -march=znver4 CXXOPTIMIZE = -Ofast -flto -ffast-math -march=znver4 FOPTIMIZE = -Ofast -flto -ffast-math -march=znver4 -funroll-loops #EXTRA_LIBS = -lamdlibm -lm -ljemalloc -lflang -flto #EXTRA_FLIBS = -lamdlibm -lm -flto # The following section was added automatically, and contains settings that # did not appear in the original configuration file, but were added to the # raw file after the run. default: sw_base_ptrsize = 64-bit sw_peak_ptrsize = 64-bit sw_other = None sw_mpi_other = None sw_preprocessors = No test_sponsor = Lenovo license_num = 28 tester = Lenovo hw_avail = Feb-2023 sw_avail = Feb-2023 prepared_by = Lenovo system_vendor = Lenovo system_name = ThinkSystem SR665 V3 (AMD EPYC 9654) node_compute_sw_state = Multi-user, run level 3 node_compute_sw_sharedfile = None node_compute_sw_other = None node_compute_sw_localfile = ext4 node_compute_purpose = compute node_compute_order = 1 node_compute_label = ThinkSystem SR665 V3 node_compute_hw_vendor = Lenovo node_compute_hw_scache = 1 MB I+D on chip per core node_compute_hw_pcache = 32 KB I + 32 KB D on chip per core node_compute_hw_other = None node_compute_hw_ocache = None node_compute_hw_nthreadspercore = 2 node_compute_hw_ncpuorder = 1, 2 chip node_compute_hw_ncoresperchip = 96 node_compute_hw_ncores = 96 node_compute_hw_nchips = 2 node_compute_hw_model = ThinkSystem SR665 V3 node_compute_hw_memory = 768 GB (24 x 32 GB 2Rx8 PC5-4800B-R) node_compute_hw_disk = 1x ThinkSystem 2.5" 5300 480GB SSD node_compute_hw_cpu_name = AMD EPYC 9654 node_compute_hw_cpu_mhz = 2400 node_compute_hw_cpu_char = Max. Boost Clock upto 3.7 GHz node_compute_hw_adapter_MISSING_slot_type = PCIe Gen5 x16 node_compute_hw_adapter_MISSING_ports_used = 1 node_compute_hw_adapter_MISSING_model = Mellanox ConnectX-6 HDR node_compute_hw_adapter_MISSING_firmware = 20.28.1002 node_compute_hw_adapter_MISSING_driver = Mellanox node_compute_hw_adapter_MISSING_data_rate = 200Gb node_compute_hw_adapter_MISSING_count = 1 node_compute_count = 1 notes_000 = MPI startup command: notes_005 = mpiexec command was used to start MPI jobs. notes_010 = Yes: The test sponsor attests, as of date of publication, notes_015 = that CVE-2017-5754 (Meltdown) is mitigated in the system as tested and documented. notes_020 = Yes: The test sponsor attests, as of date of publication, notes_025 = that CVE-2017-5753 (Spectre variant 1) is mitigated in the system as tested and documented. notes_030 = Yes: The test sponsor attests, as of date of publication, notes_035 = that CVE-2017-5715 (Spectre variant 2) is mitigated in the system as tested and documented. flagsurl000 = http://www.spec.org/mpi2007/flags/amd2021_flags.xml node_compute_hw_adapter_MISSING_interconnect = Mellanox ConnectX-6 HDR node_compute_hw_tcache000 = 384 MB I+D on chip per chip node_compute_hw_tcache001 = 32 MB shared / 8 cores node_compute_sw_os000 = Red Hat Enterprise Linux Server release 8.6, node_compute_sw_os001 = Kernel 4.18.0-372.9.1.el8.x86_64 sw_c_compiler000 = AMD Optimizing C/C++ and Fortran Compilers (AOCC) sw_c_compiler001 = Version 4.0.0 Build 389 for Linux sw_cxx_compiler000 = AMD Optimizing C/C++ and Fortran Compilers (AOCC) sw_cxx_compiler001 = Version 4.0.0 Build 389 for Linux sw_f_compiler000 = AMD Optimizing C/C++ and Fortran Compilers (AOCC) sw_f_compiler001 = Version 4.0.0 Build 389 for Linux sw_mpi_library000 = Open MPI Library for Linux sw_mpi_library001 = Version 4.1.1 system_class = Homogeneous