# Invocation command line: # /root/cpu2017-1.1.0/bin/harness/runcpu --configfile amd_speed_aocc200_rome_C1.cfg --tune all --reportable --iterations 3 --nopower --runmode speed --tune base:peak --size test:train:refspeed fpspeed # output_root was not used for this run ############################################################################ ################################################################################ # AMD AOCC 2.0.0 SPEC CPU2017 V1.1.0 Speed Configuration File for 64-bit Linux # # File name : amd_speed_aocc200_rome_C1.cfg # Creation Date : August 22, 2019 # CPU2017 Version : 1.1.0 # Supported benchmarks : All Speed benchmarks (intspeed, fpspeed) # Compiler name/version : AOCC v2.0.0 # Operating system version : Fedora 26 # Supported OS's : Ubuntu 18.04/19.04, RHEL 8.0, SLES 15 SP1 # Hardware : AMD Rome, Naples (AMD64) # FP Base Pointer Size : 64-bit # FP Peak Pointer Size : 64-bit # INT Base Pointer Size : 64-bit # INT Peak Pointer Size : 32/64-bit # Auto Parallization : No # # Note: DO NOT EDIT THIS FILE, the only edits required to properly run these # binaries are made in the ini Python file. Please consult Readme.amd_speed_aocc200_rome_C1.txt # for a few uncommon exceptions which require edits to this file. # # Description: # # This binary package automates away many of the complexities necessary to set # up and run SPEC CPU2017 under optimized conditions on AMD Rome/Naples-based # server platforms within Linux (AMD64). # # The binary package was built specifically for AMD Rome/Naples microprocessors and # is not intended to run on other products. # # Please install the binary package by following the instructions in # "Readme.amd_speed_aocc200_rome_C1.txt" under the "How To Use the Binaries" section. # # The binary package is designed to work without alteration on two socket AMD # Rome/Naples-based servers with 64 cores per socket, SMT enabled and 1 TiB of DDR4 # memory distributed evenly among all 16 channels using 32 GiB DIMMs. # # To run the binary package on other Rome/Naples configurations, please review # "Readme.amd_speed_aocc200_rome_C1.txt". In general, Rome or Naples CPUs # should be autodetected with no action required by the user. # # In most cases, it should be unnecessary to edit "amd_speed_aocc200_rome_C1.cfg" or any # other file besides "ini_amd_speed_aocc200_rome_C1.py" where reporting fields # and run conditions are set. # # The run script automatically sets the optimal number of speed copies and binds # them appropriately. # # The run script and accompanying binary package are designed to work on Ubuntu # 18.04/19.04, RHEL 8.0 and SLES 15 SP1. # # Important! If you write your own run script, please set the stack size to # "unlimited" when executing this binary package. Failure to do so may cause # some benchmarks to overflow the stack. For example, to set stack size within # the bash shell, include the following line somewhere at the top of your run # script before the runcpu invocation: # # ulimit -s unlimited # # Modification of this config file should only be necessary if you intend to # rebuild the binaries. General instructions for rebuilding the binaries are # found in-line below. # ################################################################################ # Include file name ################################################################################ # The include file contains fields that are commonly changed. This file is auto- # generated based upon INI file settings and should not need user modification # for runs. %define inc_file_name amd_speed_aocc200_rome_C1.inc ################################################################################ # Binary label extension and "allow_build" switch ################################################################################ # Only modify the binary label extension if you plan to rebuild the binaries. %define ext amd_speed_aocc200_rome_C # If you plan to recompile these CPU2017 binaries, please choose a new extension # name (ext above) to avoid confusion with the current binary set on your system # under test, and to avoid confusion for SPEC submission reviewers. You will # also need to set "allow_build" to true below. Finally, you must modify the # Paths section below to point to your library locations if the paths are not # already set up in your build environment. # Change the following line to true if you intend to REBUILD the binaries (AMD # does not support this). Valid values are "true" or "false" (no quotes). %define allow_build false ################################################################################ # Paths and Environment Variables # ** MODIFY AS NEEDED (modification should not be necessary for runs) ** ################################################################################ # Allow environment variables to be set before runs: preenv = 1 # Necessary to avoid out-of-memory exceptions on certain SUTs: preENV_MALLOC_CONF = retain:true # Define the name of the directory that holds AMD library files: %define lib_dir amd_speed_aocc200_rome_C_lib # Set the shared object library path for runs and builds: preENV_LD_LIBRARY_PATH = $[top]/%{lib_dir}/64;$[top]/%{lib_dir}/32:%{ENV_LD_LIBRARY_PATH} # Define 32-bit library build paths: # Do not use $[top] with the 32-bit libraries because doing so will cause an # options checksum error triggering a xalanc recompile attempt on SUTs having # different file paths: JEMALLOC_LIB32_PATH = /sppo/dev/cpu2017/v110/%{lib_dir}/32 OMP_LIB32_PATH = /sppo/dev/cpu2017/v110/%{lib_dir}/32 %if '%{allow_build}' eq 'false' # The include file is only needed for runs, but not for builds. # include: %{inc_file_name} # ----- Begin inclusion of 'amd_speed_aocc200_rome_C1.inc' ############################################################################ ################################################################################ ################################################################################ # File name: amd_speed_aocc200_rome_C1.inc # File generation code date: August 12, 2019 # File generation date/time: November 30, 2019 / 06:48:24 # # This file is automatically generated during a SPEC CPU2017 run. # # To modify inc file generation, please consult the readme file or the run # script. ################################################################################ ################################################################################ ################################################################################ ################################################################################ # The following macros are generated for use in the cfg file. ################################################################################ ################################################################################ %define logical_core_count 256 %define physical_core_count 128 ################################################################################ # The following macros define the Speed thread counts for the peak benchmarks. # # intspeed benchmarks: 600.perlbench_s,602.gcc_s,605.mcf_s,620.omnetpp_s, # 623.xalancbmk_s,625.x264_s,631.deepsjeng_s,641.leela_s,648.exchange2_s, # 657.xz_s # fpspeed benchmarks: 603.bwaves_s,607.cactuBSSN_s,619.lbm_s,621.wrf_s, # 627.cam4_s,628.pop2_s,638.imagick_s,644.nab_s,649.fotonik3d_s, # 654.roms_s # ################################################################################ # default preENV thread settings: default: preENV_OMP_THREAD_LIMIT = 256 preENV_GOMP_CPU_AFFINITY = 0-255 ################################################################################ ################################################################################ # intspeed base thread counts: intspeed=base: threads = 128 ENV_GOMP_CPU_AFFINITY = 0-127 bind0 = numactl --physcpubind=0-127 submit = echo "$command" > run.sh ; $BIND bash run.sh ################################################################################ ################################################################################ # fpspeed base thread counts: fpspeed=base: threads = 128 ENV_GOMP_CPU_AFFINITY = 0-127 bind0 = numactl --physcpubind=0-127 submit = echo "$command" > run.sh ; $BIND bash run.sh ################################################################################ ################################################################################ # peak thread counts: 1 600.perlbench_s,602.gcc_s,605.mcf_s,620.omnetpp_s,623.xalancbmk_s,625.x264_s,631.deepsjeng_s,641.leela_s,648.exchange2_s=peak: threads = 1 ENV_GOMP_CPU_AFFINITY = 0 bind0 = numactl --physcpubind=0 submit = echo "$command" > run.sh ; $BIND bash run.sh ################################################################################ ################################################################################ # peak thread counts: 128 603.bwaves_s,607.cactuBSSN_s,621.wrf_s,627.cam4_s,628.pop2_s,638.imagick_s,649.fotonik3d_s,654.roms_s,657.xz_s=peak: threads = 128 ENV_GOMP_CPU_AFFINITY = 0-127 bind0 = numactl --physcpubind=0-127 submit = echo "$command" > run.sh ; $BIND bash run.sh ################################################################################ ################################################################################ # peak thread counts: 256 619.lbm_s,644.nab_s=peak: threads = 256 ENV_GOMP_CPU_AFFINITY = 0 128 1 129 2 130 3 131 4 132 5 133 6 134 7 135 8 136 9 137 10 138 11 139 12 140 13 141 14 142 15 143 16 144 17 145 18 146 19 147 20 148 21 149 22 150 23 151 24 152 25 153 26 154 27 155 28 156 29 157 30 158 31 159 32 160 33 161 34 162 35 163 36 164 37 165 38 166 39 167 40 168 41 169 42 170 43 171 44 172 45 173 46 174 47 175 48 176 49 177 50 178 51 179 52 180 53 181 54 182 55 183 56 184 57 185 58 186 59 187 60 188 61 189 62 190 63 191 64 192 65 193 66 194 67 195 68 196 69 197 70 198 71 199 72 200 73 201 74 202 75 203 76 204 77 205 78 206 79 207 80 208 81 209 82 210 83 211 84 212 85 213 86 214 87 215 88 216 89 217 90 218 91 219 92 220 93 221 94 222 95 223 96 224 97 225 98 226 99 227 100 228 101 229 102 230 103 231 104 232 105 233 106 234 107 235 108 236 109 237 110 238 111 239 112 240 113 241 114 242 115 243 116 244 117 245 118 246 119 247 120 248 121 249 122 250 123 251 124 252 125 253 126 254 127 255 bind0 = numactl --physcpubind=0-255 submit = echo "$command" > run.sh ; $BIND bash run.sh ################################################################################ ################################################################################ ################################################################################ # Switch back to default: default: ################################################################################ ################################################################################ ################################################################################ # The remainder of this file defines CPU2017 report parameters. ################################################################################ ################################################################################ ################################################################################ # SPEC CPU 2017 report header ################################################################################ license_num =55 # (Your SPEC license number) tester =Dell Inc. test_sponsor =Dell Inc. hw_vendor =Dell Inc. hw_model000 =PowerEdge R7525 (AMD EPYC 7662, 2.00 GHz) #--------- If you install new compilers, edit this section -------------------- sw_compiler =C/C++/Fortran: Version 2.0.0 of AOCC ################################################################################ ################################################################################ # Hardware, firmware and software information ################################################################################ hw_avail =Feb-2020 sw_avail =Aug-2019 hw_cpu_name =AMD EPYC ROME64 hw_cpu_nominal_mhz =2000 hw_cpu_max_mhz =3300 hw_ncores =128 hw_nthreadspercore =2 hw_ncpuorder =1,2 chips hw_other =None # Other perf-relevant hw, or "None" fw_bios =Version 1.2.6 released Nov-2019 sw_base_ptrsize =64-bit hw_pcache =32 KB I + 32 KB D on chip per core hw_scache =512 KB I+D on chip per core hw_tcache000 =256 MB I+D on chip per chip, 16 MB shared / 4 hw_tcache001 = cores hw_ocache =None ################################################################################ # Notes ################################################################################ # Enter notes_000 through notes_100 here. notes_000 =Binaries were compiled on a system with 2x AMD EPYC 7601 CPU + 512GB Memory using Fedora 26 notes_005 = notes_010 =NA: The test sponsor attests, as of date of publication, that CVE-2017-5754 (Meltdown) notes_015 =is mitigated in the system as tested and documented. notes_020 =Yes: The test sponsor attests, as of date of publication, that CVE-2017-5753 (Spectre variant 1) notes_025 =is mitigated in the system as tested and documented. notes_030 =Yes: The test sponsor attests, as of date of publication, that CVE-2017-5715 (Spectre variant 2) notes_035 =is mitigated in the system as tested and documented. notes_040 = notes_submit_000 ='numactl' was used to bind copies to the cores. notes_submit_005 =See the configuration file for details. notes_os_000 ='ulimit -s unlimited' was used to set environment stack size notes_os_005 ='ulimit -l 2097152' was used to set environment locked pages in memory limit notes_os_010 = notes_os_015 =runcpu command invoked through numactl i.e.: notes_os_020 =numactl --interleave=all runcpu notes_os_025 = notes_os_030 =Set dirty_ratio=8 to limit dirty cache to 8% of memory notes_os_035 =Set swappiness=1 to swap only if necessary notes_os_040 =Set zone_reclaim_mode=1 to free local node memory and avoid remote memory notes_os_045 =sync then drop_caches=3 to reset caches before invoking runcpu notes_os_050 = notes_os_055 =dirty_ratio, swappiness, zone_reclaim_mode and drop_caches were notes_os_060 =all set using privileged echo (e.g. echo 1 > /proc/sys/vm/swappiness). notes_os_065 = notes_os_070 =Transparent huge pages set to 'always' for this run (OS default) notes_comp_000 =The AMD64 AOCC Compiler Suite is available at notes_comp_005 =http://developer.amd.com/amd-aocc/ notes_comp_010 = notes_jemalloc_000 =jemalloc: configured and built with GCC v9.1.0 in Ubuntu 19.04 with -O3 -znver2 -flto notes_jemalloc_005 =jemalloc 5.1.0 is available here: notes_jemalloc_010 =https://github.com/jemalloc/jemalloc/releases/download/5.1.0/jemalloc-5.1.0.tar.bz2 notes_jemalloc_015 = sw_other =jemalloc: jemalloc memory allocator library v5.1.0 power_management = BIOS set to prefer performance at the cost of additional power usage. ################################################################################ # The following note fields describe platorm settings. ################################################################################ # example: (uncomment as necessary) notes_plat_000 = BIOS settings: notes_plat_005 = NUMA Nodes Per Socket set to 4 notes_plat_010 = CCX as NUMA Domain set to Enabled notes_plat_015 = System Profile set to Custom notes_plat_020 = CPU Power Management set to Maximum Performance notes_plat_025 = Memory Frequency set to Maximum Performance notes_plat_030 = Turbo Boost Enabled notes_plat_035 = Cstates set to Enabled notes_plat_040 = Memory Patrol Scrub Disabled notes_plat_045 = Memory Refresh Rate set to 1x notes_plat_050 = PCI ASPM L1 Link Power Management Disabled notes_plat_055 = Determinism Slider set to Power Determinism notes_plat_060 = Efficiency Optimized Mode Disabled notes_plat_065 = Memory Interleaving set to Disabled ################################################################################ # The following are custom fields: ################################################################################ # Use custom_fields to enter lines that are not listed here. For example: # notes_plat_100 = Energy Bias set to Max Performance # new_field = Ambient temperature set to 10C ################################################################################ # The following fields must be set here for only Int benchmarks. ################################################################################ intspeed: sw_peak_ptrsize =32/64-bit ################################################################################ # The following fields must be set here for FP benchmarks. ################################################################################ fpspeed: sw_peak_ptrsize =64-bit ################################################################################ # The following fields must be set here or they will be overwritten by sysinfo. ################################################################################ intspeed,fpspeed: hw_disk =2 x 960 GB SAS SSD hw_memory000 =512 GB (16 x 32 GB 2Rx4 PC4-3200AA-R) hw_memory002 = hw_nchips =2 prepared_by =Dell Inc. sw_file =xfs sw_os000 =SUSE Linux Enterprise Server 15 SP1 sw_os001 =kernel 4.12.14-195-default sw_state =Run level 3 (multi-user) ################################################################################ # End of inc file ################################################################################ # Switch back to the default block after the include file: default: # ---- End inclusion of '/root/cpu2017-1.1.0/config/amd_speed_aocc200_rome_C1.inc' # Switch back to default block after the include file: default: fail_build = 1 # OpenMP environment variables: preENV_OMP_STACKSIZE = 128M preENV_OMP_SCHEDULE = static preENV_OMP_DYNAMIC = false %elif '%{allow_build}' eq 'true' # If you intend to rebuild, be sure to set the library paths either in the # build script or here: preENV_LIBRARY_PATH = $[top]/%{lib_dir}/64;$[top]/%{lib_dir}/32:%{ENV_LIBRARY_PATH} % define build_ncpus 64 # controls the number of simultaneous compile jobs fail_build = 0 makeflags = --jobs=%{build_ncpus} --load-average=%{build_ncpus} %else % error The value of "allow_build" is %{allow_build}, but it can only be "true" or "false". This error was generated %endif ################################################################################ # Enable automated data collection per benchmark ################################################################################ # Data collection is not enabled for reportable runs. # teeout is necessary to get data collection stdout into the logs. Best # practices for the individual data collection items would be to have # them store important output in separate files. Filenames could be # constructed from $SPEC (environment), $lognum (result number from runcpu), # and benchmark name/number. teeout = yes # Run runcpu with '-v 35' (or greater) to log lists of variables which can # be used in substitutions as below. # For CPU2006, change $label to $ext %define data-collection-parameters benchname='$name' benchnum='$num' benchmark='$benchmark' iteration=$iter size='$size' tune='$tune' label='$label' log='$log' lognum='$lognum' from_runcpu='$from_runcpu' %define data-collection-start $[top]/data-collection/data-collection start %{data-collection-parameters} %define data-collection-stop $[top]/data-collection/data-collection stop %{data-collection-parameters} monitor_specrun_wrapper = %{data-collection-start} ; $command ; %{data-collection-stop} ################################################################################ # Header settings ################################################################################ backup_config = 0 # set to 0 if you do not want backup files bench_post_setup = sync # command_add_redirect: If set, the generated ${command} will include # redirection operators (stdout, stderr), which are passed along to the shell # that executes the command. If this variable is not set, specinvoke does the # redirection. command_add_redirect = yes env_vars = yes flagsurl000 = http://www.spec.org/cpu2017/flags/aocc200-flags-B1-speed-Dell.xml flagsurl001 = http://www.spec.org/cpu2017/flags/Dell-Platform-Flags-PowerEdge-revE7.xml #flagsurl02 = $[top]/INVALID_platform_amd_speed_aocc200_rome.xml # label: User defined extension string that tags your binaries & directories: label = %{ext} line_width = 1020 log_line_width = 1020 mean_anyway = yes output_format = all reportable = yes size = test,train,ref teeout = yes teerunout = yes tune = base,peak use_submit_for_speed = yes ################################################################################ # Compilers ################################################################################ default: CC = clang CXX = clang++ FC = flang CLD = clang FLD = flang CC_VERSION_OPTION = --version CXX_VERSION_OPTION = --version FC_VERSION_OPTION = --version default:# data model applies to all benchmarks ################################################################################ # Default Flags ################################################################################ EXTRA_PORTABILITY = -DSPEC_LP64 EXTRA_LIBS = -fopenmp=libomp -lomp -ljemalloc -lamdlibm -lm MATHLIBOPT = ################################################################################ # Portability Flags ################################################################################ default: # *** Benchmark-specific portability *** # Anything other than the data model is only allowed where a need is proven. # (ordered by last 2 digits of benchmark number) 600.perlbench_s: #lang='C' PORTABILITY = -DSPEC_LINUX_X64 621.wrf_s: #lang='F,C' CPORTABILITY = -DSPEC_CASE_FLAG FPORTABILITY = -Mbyteswapio 623.xalancbmk_s: #lang='CXX' PORTABILITY = -DSPEC_LINUX 627.cam4_s: #lang='F,C' PORTABILITY = -DSPEC_CASE_FLAG 628.pop2_s: #lang='F,C' CPORTABILITY = -DSPEC_CASE_FLAG FPORTABILITY = -Mbyteswapio ################################################################################ # Tuning Flags ################################################################################ ##################### # Base tuning flags # ##################### default=base: #optimize flags COPTIMIZE = -O3 -flto -ffast-math -march=znver2 -fstruct-layout=3 \ -mllvm -unroll-threshold=50 -fremap-arrays \ -mllvm -function-specialize -mllvm -enable-gvn-hoist \ -mllvm -reduce-array-computations=3 -mllvm -global-vectorize-slp \ -mllvm -vector-library=LIBMVEC \ -mllvm -inline-threshold=1000 -flv-function-specialization CXXOPTIMIZE = -O3 -flto -ffast-math -march=znver2 \ -mllvm -loop-unswitch-threshold=200000 \ -mllvm -vector-library=LIBMVEC \ -mllvm -unroll-threshold=100 -flv-function-specialization \ -mllvm -enable-partial-unswitch FOPTIMIZE = -O3 -flto -march=znver2 -funroll-loops -Mrecursive \ -mllvm -vector-library=LIBMVEC EXTRA_FFLAGS = -Kieee -fno-finite-math-only #linker flags LDFLAGS = -flto -Wl,-mllvm -Wl,-function-specialize \ -Wl,-mllvm -Wl,-region-vectorize \ -Wl,-mllvm -Wl,-vector-library=LIBMVEC \ -Wl,-mllvm -Wl,-reduce-array-computations=3 LDCXXFLAGS = -Wl,-mllvm -Wl,-suppress-fmas #other libraries # Put OpenMP and math libraries here: EXTRA_LIBS = -fopenmp=libomp -lomp -lpthread -ldl -lmvec -lamdlibm -ljemalloc -lflang -lm # Don't put the AMD and mvec math libraries in MATHLIBOPT because it will trigger a reporting issue # because GCC won't use them. Forcefeed all benchmarks the math libraries in EXTRA_LIBS and clear # out MATHLIBOPT. MATHLIBOPT = # The following is necessary for 502/602 gcc: EXTRA_OPTIMIZE = -DSPEC_OPENMP -fopenmp -Wno-return-type -DUSE_OPENMP # The following is necessary for 502/602 gcc: LDOPTIMIZE = -z muldefs ######################## # intspeed tuning flags # ######################## intspeed: EXTRA_FFLAGS = -ffast-math \ -mllvm -disable-indvar-simplify \ -mllvm -unroll-aggressive \ -mllvm -unroll-threshold=150 LDFFLAGS = -ffast-math \ -Wl,-mllvm -Wl,-inline-recursion=4 \ -Wl,-mllvm -Wl,-lsr-in-nested-loop \ -Wl,-mllvm -Wl,-enable-iv-split ######################## # fpspeed tuning flags # ######################## fpspeed: CXX = clang++ -std=c++98 ##################### # Peak tuning flags # ##################### default=peak: #optimize flags COPTIMIZE = -Ofast -flto -march=znver2 -mno-sse4a -fstruct-layout=5 \ -mllvm -vectorize-memory-aggressively \ -mllvm -function-specialize -mllvm -enable-gvn-hoist \ -mllvm -unroll-threshold=50 -fremap-arrays \ -mllvm -vector-library=LIBMVEC \ -mllvm -reduce-array-computations=3 -mllvm -global-vectorize-slp \ -mllvm -inline-threshold=1000 -flv-function-specialization CXXOPTIMIZE = -Ofast -flto -march=znver2 -flv-function-specialization \ -mllvm -unroll-threshold=100 -mllvm -enable-partial-unswitch \ -mllvm -loop-unswitch-threshold=200000 \ -mllvm -vector-library=LIBMVEC \ -mllvm -inline-threshold=1000 FOPTIMIZE = -O3 -flto -march=znver2 -funroll-loops -Mrecursive \ -mllvm -vector-library=LIBMVEC EXTRA_FFLAGS = -Kieee -fno-finite-math-only #linker flags LDFLAGS = -flto -Wl,-mllvm -Wl,-function-specialize \ -Wl,-mllvm -Wl,-region-vectorize \ -Wl,-mllvm -Wl,-vector-library=LIBMVEC \ -Wl,-mllvm -Wl,-reduce-array-computations=3 #libraries EXTRA_LIBS = -fopenmp=libomp -lomp -lpthread -ldl -lmvec -lamdlibm -ljemalloc -lflang -lm EXTRA_OPTIMIZE = -DSPEC_OPENMP -fopenmp -Wno-return-type -DUSE_OPENMP EXTRA_FLIBS = -lmvec -lamdlibm -lm MATHLIBOPT = -lmvec -lamdlibm -lm feedback = 0 PASS1_CFLAGS = -fprofile-instr-generate PASS2_CFLAGS = -fprofile-instr-use PASS1_FFLAGS = -fprofile-generate PASS2_FFLAGS = -fprofile-use PASS1_CXXFLAGS = -fprofile-instr-generate PASS2_CXXFLAGS = -fprofile-instr-use PASS1_LDFLAGS = -fprofile-instr-generate PASS2_LDFLAGS = -fprofile-instr-use fdo_run1 = $command ; llvm-profdata merge -output=default.profdata *.profraw ######################################## # Benchmark specific peak tuning flags # ######################################## 600.perlbench_s=peak: #lang='C' feedback = 1 602.gcc_s=peak: #lang='C' EXTRA_COPTIMIZE = -fgnu89-inline LDOPTIMIZE = -z muldefs EXTRA_LIBS = -fopenmp=libomp -lomp -lpthread -ldl -lm -ljemalloc MATHLIBOPT = -lm 623.xalancbmk_s=peak: #lang='CXX` EXTRA_PORTABILITY = -D_FILE_OFFSET_BITS=64 CXX = clang++ -m32 CXXLD = clang++ -m32 EXTRA_LIBS = -L$[OMP_LIB32_PATH] -fopenmp=libomp -L$[OMP_LIB32_PATH] -lomp -lpthread -ldl -L$[JEMALLOC_LIB32_PATH] -ljemalloc MATHLIBOPT = -lm ENV_OMP_STACKSIZE = 128M 625.x264_s=peak: #lang='C' feedback = 1 654.roms_s=peak: LDFFLAGS = -Wl,-mllvm -Wl,-enable-X86-prefetching # The following settings were obtained by running the sysinfo_program # 'specperl $[top]/bin/sysinfo' (sysinfo:SHA:1b187da62efa5d65f0e989c214b6a257d16a31d3cf135973c9043da741052207) default: notes_plat_sysinfo_000 = notes_plat_sysinfo_005 = Sysinfo program /root/cpu2017-1.1.0/bin/sysinfo notes_plat_sysinfo_010 = Rev: r6365 of 2019-08-21 295195f888a3d7edb1e6e46a485a0011 notes_plat_sysinfo_015 = running on linux-g3ob Sat Nov 30 12:04:50 2019 notes_plat_sysinfo_020 = notes_plat_sysinfo_025 = SUT (System Under Test) info as seen by some common utilities. notes_plat_sysinfo_030 = For more information on this section, see notes_plat_sysinfo_035 = https://www.spec.org/cpu2017/Docs/config.html#sysinfo notes_plat_sysinfo_040 = notes_plat_sysinfo_045 = From /proc/cpuinfo notes_plat_sysinfo_050 = model name : AMD EPYC 7662 64-Core Processor notes_plat_sysinfo_055 = 2 "physical id"s (chips) notes_plat_sysinfo_060 = 256 "processors" notes_plat_sysinfo_065 = cores, siblings (Caution: counting these is hw and system dependent. The following notes_plat_sysinfo_070 = excerpts from /proc/cpuinfo might not be reliable. Use with caution.) notes_plat_sysinfo_075 = cpu cores : 64 notes_plat_sysinfo_080 = siblings : 128 notes_plat_sysinfo_085 = physical 0: cores 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 notes_plat_sysinfo_090 = 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 notes_plat_sysinfo_095 = 53 54 55 56 57 58 59 60 61 62 63 notes_plat_sysinfo_100 = physical 1: cores 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 notes_plat_sysinfo_105 = 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 notes_plat_sysinfo_110 = 53 54 55 56 57 58 59 60 61 62 63 notes_plat_sysinfo_115 = notes_plat_sysinfo_120 = From lscpu: notes_plat_sysinfo_125 = Architecture: x86_64 notes_plat_sysinfo_130 = CPU op-mode(s): 32-bit, 64-bit notes_plat_sysinfo_135 = Byte Order: Little Endian notes_plat_sysinfo_140 = Address sizes: 43 bits physical, 48 bits virtual notes_plat_sysinfo_145 = CPU(s): 256 notes_plat_sysinfo_150 = On-line CPU(s) list: 0-255 notes_plat_sysinfo_155 = Thread(s) per core: 2 notes_plat_sysinfo_160 = Core(s) per socket: 64 notes_plat_sysinfo_165 = Socket(s): 2 notes_plat_sysinfo_170 = NUMA node(s): 32 notes_plat_sysinfo_175 = Vendor ID: AuthenticAMD notes_plat_sysinfo_180 = CPU family: 23 notes_plat_sysinfo_185 = Model: 49 notes_plat_sysinfo_190 = Model name: AMD EPYC 7662 64-Core Processor notes_plat_sysinfo_195 = Stepping: 0 notes_plat_sysinfo_200 = CPU MHz: 1996.299 notes_plat_sysinfo_205 = BogoMIPS: 3992.59 notes_plat_sysinfo_210 = Virtualization: AMD-V notes_plat_sysinfo_215 = L1d cache: 32K notes_plat_sysinfo_220 = L1i cache: 32K notes_plat_sysinfo_225 = L2 cache: 512K notes_plat_sysinfo_230 = L3 cache: 16384K notes_plat_sysinfo_235 = NUMA node0 CPU(s): 0-3,128-131 notes_plat_sysinfo_240 = NUMA node1 CPU(s): 4-7,132-135 notes_plat_sysinfo_245 = NUMA node2 CPU(s): 8-11,136-139 notes_plat_sysinfo_250 = NUMA node3 CPU(s): 12-15,140-143 notes_plat_sysinfo_255 = NUMA node4 CPU(s): 16-19,144-147 notes_plat_sysinfo_260 = NUMA node5 CPU(s): 20-23,148-151 notes_plat_sysinfo_265 = NUMA node6 CPU(s): 24-27,152-155 notes_plat_sysinfo_270 = NUMA node7 CPU(s): 28-31,156-159 notes_plat_sysinfo_275 = NUMA node8 CPU(s): 32-35,160-163 notes_plat_sysinfo_280 = NUMA node9 CPU(s): 36-39,164-167 notes_plat_sysinfo_285 = NUMA node10 CPU(s): 40-43,168-171 notes_plat_sysinfo_290 = NUMA node11 CPU(s): 44-47,172-175 notes_plat_sysinfo_295 = NUMA node12 CPU(s): 48-51,176-179 notes_plat_sysinfo_300 = NUMA node13 CPU(s): 52-55,180-183 notes_plat_sysinfo_305 = NUMA node14 CPU(s): 56-59,184-187 notes_plat_sysinfo_310 = NUMA node15 CPU(s): 60-63,188-191 notes_plat_sysinfo_315 = NUMA node16 CPU(s): 64-67,192-195 notes_plat_sysinfo_320 = NUMA node17 CPU(s): 68-71,196-199 notes_plat_sysinfo_325 = NUMA node18 CPU(s): 72-75,200-203 notes_plat_sysinfo_330 = NUMA node19 CPU(s): 76-79,204-207 notes_plat_sysinfo_335 = NUMA node20 CPU(s): 80-83,208-211 notes_plat_sysinfo_340 = NUMA node21 CPU(s): 84-87,212-215 notes_plat_sysinfo_345 = NUMA node22 CPU(s): 88-91,216-219 notes_plat_sysinfo_350 = NUMA node23 CPU(s): 92-95,220-223 notes_plat_sysinfo_355 = NUMA node24 CPU(s): 96-99,224-227 notes_plat_sysinfo_360 = NUMA node25 CPU(s): 100-103,228-231 notes_plat_sysinfo_365 = NUMA node26 CPU(s): 104-107,232-235 notes_plat_sysinfo_370 = NUMA node27 CPU(s): 108-111,236-239 notes_plat_sysinfo_375 = NUMA node28 CPU(s): 112-115,240-243 notes_plat_sysinfo_380 = NUMA node29 CPU(s): 116-119,244-247 notes_plat_sysinfo_385 = NUMA node30 CPU(s): 120-123,248-251 notes_plat_sysinfo_390 = NUMA node31 CPU(s): 124-127,252-255 notes_plat_sysinfo_395 = Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov notes_plat_sysinfo_400 = pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm notes_plat_sysinfo_405 = constant_tsc rep_good nopl xtopology nonstop_tsc cpuid extd_apicid aperfmperf pni notes_plat_sysinfo_410 = pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx notes_plat_sysinfo_415 = f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse notes_plat_sysinfo_420 = 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext notes_plat_sysinfo_425 = perfctr_l2 mwaitx cpb cat_l3 cdp_l3 hw_pstate sme ssbd sev ibrs ibpb stibp vmmcall notes_plat_sysinfo_430 = fsgsbase bmi1 avx2 smep bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni notes_plat_sysinfo_435 = xsaveopt xsavec xgetbv1 xsaves cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local notes_plat_sysinfo_440 = clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean notes_plat_sysinfo_445 = flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif umip notes_plat_sysinfo_450 = rdpid overflow_recov succor smca notes_plat_sysinfo_455 = notes_plat_sysinfo_460 = /proc/cpuinfo cache data notes_plat_sysinfo_465 = cache size : 512 KB notes_plat_sysinfo_470 = notes_plat_sysinfo_475 = From numactl --hardware WARNING: a numactl 'node' might or might not correspond to a notes_plat_sysinfo_480 = physical chip. notes_plat_sysinfo_485 = available: 32 nodes (0-31) notes_plat_sysinfo_490 = node 0 cpus: 0 1 2 3 128 129 130 131 notes_plat_sysinfo_495 = node 0 size: 15548 MB notes_plat_sysinfo_500 = node 0 free: 15487 MB notes_plat_sysinfo_505 = node 1 cpus: 4 5 6 7 132 133 134 135 notes_plat_sysinfo_510 = node 1 size: 16126 MB notes_plat_sysinfo_515 = node 1 free: 16071 MB notes_plat_sysinfo_520 = node 2 cpus: 8 9 10 11 136 137 138 139 notes_plat_sysinfo_525 = node 2 size: 16126 MB notes_plat_sysinfo_530 = node 2 free: 16006 MB notes_plat_sysinfo_535 = node 3 cpus: 12 13 14 15 140 141 142 143 notes_plat_sysinfo_540 = node 3 size: 16125 MB notes_plat_sysinfo_545 = node 3 free: 16092 MB notes_plat_sysinfo_550 = node 4 cpus: 16 17 18 19 144 145 146 147 notes_plat_sysinfo_555 = node 4 size: 16126 MB notes_plat_sysinfo_560 = node 4 free: 16104 MB notes_plat_sysinfo_565 = node 5 cpus: 20 21 22 23 148 149 150 151 notes_plat_sysinfo_570 = node 5 size: 16126 MB notes_plat_sysinfo_575 = node 5 free: 16106 MB notes_plat_sysinfo_580 = node 6 cpus: 24 25 26 27 152 153 154 155 notes_plat_sysinfo_585 = node 6 size: 16126 MB notes_plat_sysinfo_590 = node 6 free: 16077 MB notes_plat_sysinfo_595 = node 7 cpus: 28 29 30 31 156 157 158 159 notes_plat_sysinfo_600 = node 7 size: 16125 MB notes_plat_sysinfo_605 = node 7 free: 16105 MB notes_plat_sysinfo_610 = node 8 cpus: 32 33 34 35 160 161 162 163 notes_plat_sysinfo_615 = node 8 size: 16126 MB notes_plat_sysinfo_620 = node 8 free: 16106 MB notes_plat_sysinfo_625 = node 9 cpus: 36 37 38 39 164 165 166 167 notes_plat_sysinfo_630 = node 9 size: 16126 MB notes_plat_sysinfo_635 = node 9 free: 16107 MB notes_plat_sysinfo_640 = node 10 cpus: 40 41 42 43 168 169 170 171 notes_plat_sysinfo_645 = node 10 size: 16126 MB notes_plat_sysinfo_650 = node 10 free: 16102 MB notes_plat_sysinfo_655 = node 11 cpus: 44 45 46 47 172 173 174 175 notes_plat_sysinfo_660 = node 11 size: 16125 MB notes_plat_sysinfo_665 = node 11 free: 16099 MB notes_plat_sysinfo_670 = node 12 cpus: 48 49 50 51 176 177 178 179 notes_plat_sysinfo_675 = node 12 size: 16126 MB notes_plat_sysinfo_680 = node 12 free: 16087 MB notes_plat_sysinfo_685 = node 13 cpus: 52 53 54 55 180 181 182 183 notes_plat_sysinfo_690 = node 13 size: 16126 MB notes_plat_sysinfo_695 = node 13 free: 16107 MB notes_plat_sysinfo_700 = node 14 cpus: 56 57 58 59 184 185 186 187 notes_plat_sysinfo_705 = node 14 size: 16126 MB notes_plat_sysinfo_710 = node 14 free: 16107 MB notes_plat_sysinfo_715 = node 15 cpus: 60 61 62 63 188 189 190 191 notes_plat_sysinfo_720 = node 15 size: 16113 MB notes_plat_sysinfo_725 = node 15 free: 16095 MB notes_plat_sysinfo_730 = node 16 cpus: 64 65 66 67 192 193 194 195 notes_plat_sysinfo_735 = node 16 size: 16126 MB notes_plat_sysinfo_740 = node 16 free: 16098 MB notes_plat_sysinfo_745 = node 17 cpus: 68 69 70 71 196 197 198 199 notes_plat_sysinfo_750 = node 17 size: 16126 MB notes_plat_sysinfo_755 = node 17 free: 16105 MB notes_plat_sysinfo_760 = node 18 cpus: 72 73 74 75 200 201 202 203 notes_plat_sysinfo_765 = node 18 size: 16126 MB notes_plat_sysinfo_770 = node 18 free: 16106 MB notes_plat_sysinfo_775 = node 19 cpus: 76 77 78 79 204 205 206 207 notes_plat_sysinfo_780 = node 19 size: 16125 MB notes_plat_sysinfo_785 = node 19 free: 16104 MB notes_plat_sysinfo_790 = node 20 cpus: 80 81 82 83 208 209 210 211 notes_plat_sysinfo_795 = node 20 size: 16126 MB notes_plat_sysinfo_800 = node 20 free: 16103 MB notes_plat_sysinfo_805 = node 21 cpus: 84 85 86 87 212 213 214 215 notes_plat_sysinfo_810 = node 21 size: 16126 MB notes_plat_sysinfo_815 = node 21 free: 16104 MB notes_plat_sysinfo_820 = node 22 cpus: 88 89 90 91 216 217 218 219 notes_plat_sysinfo_825 = node 22 size: 16126 MB notes_plat_sysinfo_830 = node 22 free: 16107 MB notes_plat_sysinfo_835 = node 23 cpus: 92 93 94 95 220 221 222 223 notes_plat_sysinfo_840 = node 23 size: 16125 MB notes_plat_sysinfo_845 = node 23 free: 16106 MB notes_plat_sysinfo_850 = node 24 cpus: 96 97 98 99 224 225 226 227 notes_plat_sysinfo_855 = node 24 size: 16126 MB notes_plat_sysinfo_860 = node 24 free: 16065 MB notes_plat_sysinfo_865 = node 25 cpus: 100 101 102 103 228 229 230 231 notes_plat_sysinfo_870 = node 25 size: 16126 MB notes_plat_sysinfo_875 = node 25 free: 15964 MB notes_plat_sysinfo_880 = node 26 cpus: 104 105 106 107 232 233 234 235 notes_plat_sysinfo_885 = node 26 size: 16126 MB notes_plat_sysinfo_890 = node 26 free: 16065 MB notes_plat_sysinfo_895 = node 27 cpus: 108 109 110 111 236 237 238 239 notes_plat_sysinfo_900 = node 27 size: 16125 MB notes_plat_sysinfo_905 = node 27 free: 16105 MB notes_plat_sysinfo_910 = node 28 cpus: 112 113 114 115 240 241 242 243 notes_plat_sysinfo_915 = node 28 size: 16126 MB notes_plat_sysinfo_920 = node 28 free: 16103 MB notes_plat_sysinfo_925 = node 29 cpus: 116 117 118 119 244 245 246 247 notes_plat_sysinfo_930 = node 29 size: 16126 MB notes_plat_sysinfo_935 = node 29 free: 16107 MB notes_plat_sysinfo_940 = node 30 cpus: 120 121 122 123 248 249 250 251 notes_plat_sysinfo_945 = node 30 size: 16126 MB notes_plat_sysinfo_950 = node 30 free: 16097 MB notes_plat_sysinfo_955 = node 31 cpus: 124 125 126 127 252 253 254 255 notes_plat_sysinfo_960 = node 31 size: 16093 MB notes_plat_sysinfo_965 = node 31 free: 16074 MB notes_plat_sysinfo_970 = node distances: notes_plat_sysinfo_975 = node 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 notes_plat_sysinfo_980 = 20 21 22 23 24 25 26 27 28 29 30 31 notes_plat_sysinfo_985 = 0: 10 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 notes_plat_sysinfo_990 = 11 11 11 11 11 11 11 11 11 11 11 11 notes_plat_sysinfo_995 = 1: 11 10 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 notes_plat_sysinfo_1000 = 11 11 11 11 11 11 11 11 11 11 11 11 notes_plat_sysinfo_1005 = 2: 11 11 10 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 notes_plat_sysinfo_1010 = 11 11 11 11 11 11 11 11 11 11 11 11 notes_plat_sysinfo_1015 = 3: 11 11 11 10 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 notes_plat_sysinfo_1020 = 11 11 11 11 11 11 11 11 11 11 11 11 notes_plat_sysinfo_1025 = 4: 11 11 11 11 10 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 notes_plat_sysinfo_1030 = 11 11 11 11 11 11 11 11 11 11 11 11 notes_plat_sysinfo_1035 = 5: 11 11 11 11 11 10 11 11 11 11 11 11 11 11 11 11 11 11 11 11 notes_plat_sysinfo_1040 = 11 11 11 11 11 11 11 11 11 11 11 11 notes_plat_sysinfo_1045 = 6: 11 11 11 11 11 11 10 11 11 11 11 11 11 11 11 11 11 11 11 11 notes_plat_sysinfo_1050 = 11 11 11 11 11 11 11 11 11 11 11 11 notes_plat_sysinfo_1055 = 7: 11 11 11 11 11 11 11 10 11 11 11 11 11 11 11 11 11 11 11 11 notes_plat_sysinfo_1060 = 11 11 11 11 11 11 11 11 11 11 11 11 notes_plat_sysinfo_1065 = 8: 11 11 11 11 11 11 11 11 10 11 11 11 11 11 11 11 11 11 11 11 notes_plat_sysinfo_1070 = 11 11 11 11 11 11 11 11 11 11 11 11 notes_plat_sysinfo_1075 = 9: 11 11 11 11 11 11 11 11 11 10 11 11 11 11 11 11 11 11 11 11 notes_plat_sysinfo_1080 = 11 11 11 11 11 11 11 11 11 11 11 11 notes_plat_sysinfo_1085 = 10: 11 11 11 11 11 11 11 11 11 11 10 11 11 11 11 11 11 11 11 11 notes_plat_sysinfo_1090 = 11 11 11 11 11 11 11 11 11 11 11 11 notes_plat_sysinfo_1095 = 11: 11 11 11 11 11 11 11 11 11 11 11 10 11 11 11 11 11 11 11 11 notes_plat_sysinfo_1100 = 11 11 11 11 11 11 11 11 11 11 11 11 notes_plat_sysinfo_1105 = 12: 11 11 11 11 11 11 11 11 11 11 11 11 10 11 11 11 11 11 11 11 notes_plat_sysinfo_1110 = 11 11 11 11 11 11 11 11 11 11 11 11 notes_plat_sysinfo_1115 = 13: 11 11 11 11 11 11 11 11 11 11 11 11 11 10 11 11 11 11 11 11 notes_plat_sysinfo_1120 = 11 11 11 11 11 11 11 11 11 11 11 11 notes_plat_sysinfo_1125 = 14: 11 11 11 11 11 11 11 11 11 11 11 11 11 11 10 11 11 11 11 11 notes_plat_sysinfo_1130 = 11 11 11 11 11 11 11 11 11 11 11 11 notes_plat_sysinfo_1135 = 15: 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 10 11 11 11 11 notes_plat_sysinfo_1140 = 11 11 11 11 11 11 11 11 11 11 11 11 notes_plat_sysinfo_1145 = 16: 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 10 11 11 11 notes_plat_sysinfo_1150 = 11 11 11 11 11 11 11 11 11 11 11 11 notes_plat_sysinfo_1155 = 17: 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 10 11 11 notes_plat_sysinfo_1160 = 11 11 11 11 11 11 11 11 11 11 11 11 notes_plat_sysinfo_1165 = 18: 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 10 11 notes_plat_sysinfo_1170 = 11 11 11 11 11 11 11 11 11 11 11 11 notes_plat_sysinfo_1175 = 19: 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 10 notes_plat_sysinfo_1180 = 11 11 11 11 11 11 11 11 11 11 11 11 notes_plat_sysinfo_1185 = 20: 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 notes_plat_sysinfo_1190 = 10 11 11 11 11 11 11 11 11 11 11 11 notes_plat_sysinfo_1195 = 21: 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 notes_plat_sysinfo_1200 = 11 10 11 11 11 11 11 11 11 11 11 11 notes_plat_sysinfo_1205 = 22: 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 notes_plat_sysinfo_1210 = 11 11 10 11 11 11 11 11 11 11 11 11 notes_plat_sysinfo_1215 = 23: 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 notes_plat_sysinfo_1220 = 11 11 11 10 11 11 11 11 11 11 11 11 notes_plat_sysinfo_1225 = 24: 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 notes_plat_sysinfo_1230 = 11 11 11 11 10 11 11 11 11 11 11 11 notes_plat_sysinfo_1235 = 25: 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 notes_plat_sysinfo_1240 = 11 11 11 11 11 10 11 11 11 11 11 11 notes_plat_sysinfo_1245 = 26: 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 notes_plat_sysinfo_1250 = 11 11 11 11 11 11 10 11 11 11 11 11 notes_plat_sysinfo_1255 = 27: 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 notes_plat_sysinfo_1260 = 11 11 11 11 11 11 11 10 11 11 11 11 notes_plat_sysinfo_1265 = 28: 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 notes_plat_sysinfo_1270 = 11 11 11 11 11 11 11 11 10 11 11 11 notes_plat_sysinfo_1275 = 29: 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 notes_plat_sysinfo_1280 = 11 11 11 11 11 11 11 11 11 10 11 11 notes_plat_sysinfo_1285 = 30: 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 notes_plat_sysinfo_1290 = 11 11 11 11 11 11 11 11 11 11 10 11 notes_plat_sysinfo_1295 = 31: 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 notes_plat_sysinfo_1300 = 11 11 11 11 11 11 11 11 11 11 11 10 notes_plat_sysinfo_1305 = notes_plat_sysinfo_1310 = From /proc/meminfo notes_plat_sysinfo_1315 = MemTotal: 527783152 kB notes_plat_sysinfo_1320 = HugePages_Total: 0 notes_plat_sysinfo_1325 = Hugepagesize: 2048 kB notes_plat_sysinfo_1330 = notes_plat_sysinfo_1335 = From /etc/*release* /etc/*version* notes_plat_sysinfo_1340 = os-release: notes_plat_sysinfo_1345 = NAME="SLES" notes_plat_sysinfo_1350 = VERSION="15-SP1" notes_plat_sysinfo_1355 = VERSION_ID="15.1" notes_plat_sysinfo_1360 = PRETTY_NAME="SUSE Linux Enterprise Server 15 SP1" notes_plat_sysinfo_1365 = ID="sles" notes_plat_sysinfo_1370 = ID_LIKE="suse" notes_plat_sysinfo_1375 = ANSI_COLOR="0;32" notes_plat_sysinfo_1380 = CPE_NAME="cpe:/o:suse:sles:15:sp1" notes_plat_sysinfo_1385 = notes_plat_sysinfo_1390 = uname -a: notes_plat_sysinfo_1395 = Linux linux-g3ob 4.12.14-195-default #1 SMP Tue May 7 10:55:11 UTC 2019 (8fba516) notes_plat_sysinfo_1400 = x86_64 x86_64 x86_64 GNU/Linux notes_plat_sysinfo_1405 = notes_plat_sysinfo_1410 = Kernel self-reported vulnerability status: notes_plat_sysinfo_1415 = notes_plat_sysinfo_1420 = CVE-2018-3620 (L1 Terminal Fault): Not affected notes_plat_sysinfo_1425 = Microarchitectural Data Sampling: Not affected notes_plat_sysinfo_1430 = CVE-2017-5754 (Meltdown): Not affected notes_plat_sysinfo_1435 = CVE-2018-3639 (Speculative Store Bypass): Mitigation: Speculative Store Bypass disabled notes_plat_sysinfo_1440 = via prctl and seccomp notes_plat_sysinfo_1445 = CVE-2017-5753 (Spectre variant 1): Mitigation: __user pointer sanitization notes_plat_sysinfo_1450 = CVE-2017-5715 (Spectre variant 2): Mitigation: Full AMD retpoline, IBPB: notes_plat_sysinfo_1455 = conditional, IBRS_FW, STIBP: conditional, RSB notes_plat_sysinfo_1460 = filling notes_plat_sysinfo_1465 = notes_plat_sysinfo_1470 = run-level 3 Nov 29 14:42 last=5 notes_plat_sysinfo_1475 = notes_plat_sysinfo_1480 = SPEC is set to: /root/cpu2017-1.1.0 notes_plat_sysinfo_1485 = Filesystem Type Size Used Avail Use% Mounted on notes_plat_sysinfo_1490 = /dev/sda2 xfs 440G 36G 405G 8% / notes_plat_sysinfo_1495 = notes_plat_sysinfo_1500 = From /sys/devices/virtual/dmi/id notes_plat_sysinfo_1505 = BIOS: Dell Inc. 1.2.6 11/21/2019 notes_plat_sysinfo_1510 = Vendor: Dell Inc. notes_plat_sysinfo_1515 = Product: PowerEdge R7525 notes_plat_sysinfo_1520 = Product Family: PowerEdge notes_plat_sysinfo_1525 = Serial: 1234567 notes_plat_sysinfo_1530 = notes_plat_sysinfo_1535 = Additional information from dmidecode follows. WARNING: Use caution when you interpret notes_plat_sysinfo_1540 = this section. The 'dmidecode' program reads system data which is "intended to allow notes_plat_sysinfo_1545 = hardware to be accurately determined", but the intent may not be met, as there are notes_plat_sysinfo_1550 = frequent changes to hardware, firmware, and the "DMTF SMBIOS" standard. notes_plat_sysinfo_1555 = Memory: notes_plat_sysinfo_1560 = 7x 802C80B3802C 36ASF4G72PZ-3G2E2 32 GB 2 rank 3200 notes_plat_sysinfo_1565 = 8x 802C869D802C 36ASF4G72PZ-3G2E2 32 GB 2 rank 3200 notes_plat_sysinfo_1570 = 1x 80AD80B380AD HMA84GR7CJR4N-XN 32 GB 2 rank 3200 notes_plat_sysinfo_1575 = 16x Not Specified Not Specified notes_plat_sysinfo_1580 = notes_plat_sysinfo_1585 = (End of data from sysinfo program) hw_cpu_name = AMD EPYC 7662 hw_disk = 440 GB add more disk info here hw_memory001 = 503.333 GB fixme: If using DDR4, the format is: hw_memory002 = 'N GB (N x N GB nRxn PC4-nnnnX-X)' hw_nchips = 2 prepared_by = root (is never output, only tags rawfile) sw_file = xfs sw_os001 = NAME="SLES" sw_state = Run level 3 (add definition here) # End of settings added by sysinfo_program 654.roms_s: # The following setting was inserted automatically as a result of # post-run basepeak application. basepeak = 1 649.fotonik3d_s: # The following setting was inserted automatically as a result of # post-run basepeak application. basepeak = 1 638.imagick_s: # The following setting was inserted automatically as a result of # post-run basepeak application. basepeak = 1 607.cactuBSSN_s: # The following setting was inserted automatically as a result of # post-run basepeak application. basepeak = 1 603.bwaves_s: # The following setting was inserted automatically as a result of # post-run basepeak application. basepeak = 1