new file mode 100644
@@ -0,0 +1,28 @@
+NAME = "NAS Parallel Benchmarks"
+AUTHOR = "Cao, Chen <kcao@redhat.com>"
+TEST_TYPE = "CLIENT"
+TEST_CLASS = "HARDWARE"
+TEST_CATEGORY = "BENCHMARK"
+TIME = "MEDIUM"
+DOC = """\
+Using NPB, OpenMP implementation.
+
+See http://www.nas.nasa.gov/Software/NPB/
+"""
+
+# Supported tests (benchmarks):
+# bt.A bt.B bt.C bt.D bt.E bt.S bt.W
+# cg.A cg.B cg.C cg.S cg.W
+# dc.A dc.B dc.S dc.W
+# ep.A ep.B ep.C ep.D ep.E ep.S ep.W
+# ft.A ft.B ft.S ft.W
+# is.A is.B is.C is.S is.W
+# lu.A lu.B lu.C lu.S lu.W
+# mg.A mg.B mg.S mg.W
+# sp.A sp.B sp.C sp.D sp.E sp.S sp.W
+# ua.A ua.B ua.C ua.S ua.W
+#
+# Please refer to npb.py for more infomation about
+# the arguments.
+job.run_test(url='npb', tests='ep.A ep.B')
+
@@ -96,6 +96,9 @@ variants:
# control file and set this timeout appropriately.
test_timeout = 3900
test_control_file = cerberus.control
+ - npb:
+ test_name = npb
+ test_control_file = npb.control
- linux_s3: install setup
type = linux_s3
@@ -639,6 +642,7 @@ linux_s3:
variants:
- @up:
+ no autotest.npb
- smp2:
extra_params += " -smp 2"
new file mode 100644
@@ -0,0 +1,28 @@
+NAME = "NAS Parallel Benchmarks"
+AUTHOR = "Cao, Chen <kcao@redhat.com>"
+TEST_TYPE = "CLIENT"
+TEST_CLASS = "HARDWARE"
+TEST_CATEGORY = "BENCHMARK"
+TIME = "MEDIUM"
+DOC = """\
+Using NPB, OpenMP implementation.
+
+See http://www.nas.nasa.gov/Software/NPB/
+"""
+
+# Supported tests (benchmarks):
+# bt.A bt.B bt.C bt.D bt.E bt.S bt.W
+# cg.A cg.B cg.C cg.S cg.W
+# dc.A dc.B dc.S dc.W
+# ep.A ep.B ep.C ep.D ep.E ep.S ep.W
+# ft.A ft.B ft.S ft.W
+# is.A is.B is.C is.S is.W
+# lu.A lu.B lu.C lu.S lu.W
+# mg.A mg.B mg.S mg.W
+# sp.A sp.B sp.C sp.D sp.E sp.S sp.W
+# ua.A ua.B ua.C ua.S ua.W
+#
+# Please refer to npb.py for more infomation about
+# the arguments.
+job.run_test(url='npb', tests='ep.A ep.B bt.S bt.W')
+
new file mode 100644
@@ -0,0 +1,233 @@
+diff --git a/NPB3.3-OMP/config/make.def b/NPB3.3-OMP/config/make.def
+new file mode 100644
+index 0000000..afffe7d
+--- /dev/null
++++ b/NPB3.3-OMP/config/make.def
+@@ -0,0 +1,161 @@
++#---------------------------------------------------------------------------
++#
++# SITE- AND/OR PLATFORM-SPECIFIC DEFINITIONS.
++#
++#---------------------------------------------------------------------------
++
++#---------------------------------------------------------------------------
++# Items in this file will need to be changed for each platform.
++#---------------------------------------------------------------------------
++
++#---------------------------------------------------------------------------
++# Parallel Fortran:
++#
++# For CG, EP, FT, MG, LU, SP, BT and UA, which are in Fortran, the following
++# must be defined:
++#
++# F77 - Fortran compiler
++# FFLAGS - Fortran compilation arguments
++# F_INC - any -I arguments required for compiling Fortran
++# FLINK - Fortran linker
++# FLINKFLAGS - Fortran linker arguments
++# F_LIB - any -L and -l arguments required for linking Fortran
++#
++# compilations are done with $(F77) $(F_INC) $(FFLAGS) or
++# $(F77) $(FFLAGS)
++# linking is done with $(FLINK) $(F_LIB) $(FLINKFLAGS)
++#---------------------------------------------------------------------------
++
++#---------------------------------------------------------------------------
++# This is the fortran compiler used for Fortran programs
++#---------------------------------------------------------------------------
++F77 = gfortran
++# This links fortran programs; usually the same as ${F77}
++FLINK = $(F77)
++
++#---------------------------------------------------------------------------
++# These macros are passed to the linker
++#---------------------------------------------------------------------------
++F_LIB =
++
++#---------------------------------------------------------------------------
++# These macros are passed to the compiler
++#---------------------------------------------------------------------------
++F_INC =
++
++#---------------------------------------------------------------------------
++# Global *compile time* flags for Fortran programs
++#---------------------------------------------------------------------------
++FFLAGS = -O -fopenmp
++
++#---------------------------------------------------------------------------
++# Global *link time* flags. Flags for increasing maximum executable
++# size usually go here.
++#---------------------------------------------------------------------------
++FLINKFLAGS = -O -fopenmp
++
++
++#---------------------------------------------------------------------------
++# Parallel C:
++#
++# For IS and DC, which are in C, the following must be defined:
++#
++# CC - C compiler
++# CFLAGS - C compilation arguments
++# C_INC - any -I arguments required for compiling C
++# CLINK - C linker
++# CLINKFLAGS - C linker flags
++# C_LIB - any -L and -l arguments required for linking C
++#
++# compilations are done with $(CC) $(C_INC) $(CFLAGS) or
++# $(CC) $(CFLAGS)
++# linking is done with $(CLINK) $(C_LIB) $(CLINKFLAGS)
++#---------------------------------------------------------------------------
++
++#---------------------------------------------------------------------------
++# This is the C compiler used for C programs
++#---------------------------------------------------------------------------
++CC = cc
++# This links C programs; usually the same as ${CC}
++CLINK = $(CC)
++
++#---------------------------------------------------------------------------
++# These macros are passed to the linker
++#---------------------------------------------------------------------------
++C_LIB = -lm
++
++#---------------------------------------------------------------------------
++# These macros are passed to the compiler
++#---------------------------------------------------------------------------
++C_INC =
++
++#---------------------------------------------------------------------------
++# Global *compile time* flags for C programs
++# DC inspects the following flags (preceded by "-D"):
++#
++# IN_CORE - computes all views and checksums in main memory (if there is
++# enough memory)
++#
++# VIEW_FILE_OUTPUT - forces DC to write the generated views to disk
++#
++# OPTIMIZATION - turns on some nonstandard DC optimizations
++#
++# _FILE_OFFSET_BITS=64
++# _LARGEFILE64_SOURCE - are standard compiler flags which allow to work with
++# files larger than 2GB.
++#---------------------------------------------------------------------------
++CFLAGS = -O
++
++#---------------------------------------------------------------------------
++# Global *link time* flags. Flags for increasing maximum executable
++# size usually go here.
++#---------------------------------------------------------------------------
++CLINKFLAGS = -O
++
++
++#---------------------------------------------------------------------------
++# Utilities C:
++#
++# This is the C compiler used to compile C utilities. Flags required by
++# this compiler go here also; typically there are few flags required; hence
++# there are no separate macros provided for such flags.
++#---------------------------------------------------------------------------
++UCC = cc
++
++
++#---------------------------------------------------------------------------
++# Destination of executables, relative to subdirs of the main directory. .
++#---------------------------------------------------------------------------
++BINDIR = ../bin
++
++
++#---------------------------------------------------------------------------
++# The variable RAND controls which random number generator
++# is used. It is described in detail in README.install.
++# Use "randi8" unless there is a reason to use another one.
++# Other allowed values are "randi8_safe", "randdp" and "randdpvec"
++#---------------------------------------------------------------------------
++RAND = randi8
++# The following is highly reliable but may be slow:
++# RAND = randdp
++
++
++#---------------------------------------------------------------------------
++# The variable WTIME is the name of the wtime source code module in the
++# common directory.
++# For most machines, use wtime.c
++# For SGI power challenge: use wtime_sgi64.c
++#---------------------------------------------------------------------------
++WTIME = wtime.c
++
++
++#---------------------------------------------------------------------------
++# Enable if either Cray (not Cray-X1) or IBM:
++# (no such flag for most machines: see common/wtime.h)
++# This is used by the C compiler to pass the machine name to common/wtime.h,
++# where the C/Fortran binding interface format is determined
++#---------------------------------------------------------------------------
++# MACHINE = -DCRAY
++# MACHINE = -DIBM
++
++
+diff --git a/NPB3.3-OMP/config/suite.def b/NPB3.3-OMP/config/suite.def
+new file mode 100644
+index 0000000..7342195
+--- /dev/null
++++ b/NPB3.3-OMP/config/suite.def
+@@ -0,0 +1,60 @@
++# config/suite.def
++# This file is used to build several benchmarks with a single command.
++# Typing "make suite" in the main directory will build all the benchmarks
++# specified in this file.
++# Each line of this file contains a benchmark name and the class.
++# The name is one of "cg", "is", "dc", "ep", mg", "ft", "sp",
++# "bt", "lu", and "ua".
++# The class is one of "S", "W", "A" through "E"
++# (except that no classes C,D,E for DC and no class E for IS and UA).
++# No blank lines.
++# The following example builds sample sizes of all benchmarks.
++ft A
++ft B
++ft S
++ft W
++mg A
++mg B
++mg S
++mg W
++sp A
++sp B
++sp C
++sp S
++sp W
++lu A
++lu B
++lu C
++lu S
++lu W
++bt A
++bt B
++bt C
++bt S
++bt W
++is A
++is B
++is C
++is S
++is W
++ep A
++ep B
++ep C
++ep D
++ep E
++ep S
++ep W
++cg A
++cg B
++cg C
++cg S
++cg W
++ua A
++ua B
++ua C
++ua S
++ua W
++dc A
++dc B
++dc S
++dc W
new file mode 100644
@@ -0,0 +1,145 @@
+import os, shutil, logging, re
+from autotest_lib.client.bin import test, utils
+from autotest_lib.client.common_lib import error
+
+class npb(test.test):
+ """
+ This module runs the NAS Parallel Benchmarks on the client machine
+
+ @note: Since we use gfortran to complie these benchmarks, this test might
+ not be able to run on older Operating Systems.
+ @see: http://www.nas.nasa.gov/Resources/Software/npb.html
+ """
+ version = 1
+ def initialize(self, tests=''):
+ # Initialize failure counter
+ self.n_fail = 0
+ # Get the parameters for run_once()
+ self.tests = tests
+ # Ratio is the reason between 1 and the number of CPUs of the system.
+ self.ratio = 1.0 / utils.count_cpus()
+ logging.debug('Ratio (1/n_cpus) found for this system: %s' % self.ratio)
+
+
+ def setup(self, tarball='NPB3.3.tar.gz'):
+ tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir)
+ utils.extract_tarball_to_dir(tarball, self.srcdir)
+ os.chdir(self.srcdir)
+ # Prepare the makefile and benchmarks to generate.
+ utils.system('patch -p1 < ../enable-all-tests.patch')
+ utils.system('cd NPB3.3-OMP && make suite')
+
+
+ def run_once(self):
+ """
+ Run each benchmark twice, with different number of threads.
+
+ A sanity check is made on each benchmark executed:
+ The ratio between the times
+ time_ratio = time_one_thrd / time_full_thrds
+
+ Has to be contained inside an envelope:
+ upper_bound = full_thrds * (1 + (1/n_cpus))
+ lower_bound = full_thrds * (1 - (1/n_cpus))
+
+ Otherwise, we throw an exception (this test might be running under a
+ virtual machine and sanity check failure might mean bugs on smp
+ implementation).
+ """
+ os.chdir(self.srcdir)
+
+ # get the tests to run
+ test_list = self.tests.split()
+
+ if len(test_list) == 0:
+ raise error.TestError('No tests (benchmarks) provided. Exit.')
+
+ for itest in test_list:
+ itest_cmd = os.path.join('NPB3.3-OMP/bin/', itest)
+ try:
+ itest = utils.run(itest_cmd)
+ except:
+ logging.error('NPB benchmark %s has failed. Output: %s',
+ itest_cmd, itest.stdout)
+ self.n_fail += 1
+ logging.debug(itest.stdout)
+
+ # Get the number of threads that the test ran
+ # (which is supposed to be equal to the number of system cores)
+ m = re.search('Total threads\s*=\s*(.*)\n', itest.stdout)
+
+ # Gather benchmark results
+ ts = re.search('Time in seconds\s*=\s*(.*)\n', itest.stdout)
+ mt = re.search('Mop/s total\s*=\s*(.*)\n', itest.stdout)
+ mp = re.search('Mop/s/thread\s*=\s*(.*)\n', itest.stdout)
+
+ time_seconds = float(ts.groups()[0])
+ mops_total = float(mt.groups()[0])
+ mops_per_thread = float(mp.groups()[0])
+
+ logging.info('Test: %s', itest_cmd)
+ logging.info('Time (s): %s', time_seconds)
+ logging.info('Total operations executed (mops/s): %s', mops_total)
+ logging.info('Total operations per thread (mops/s/thread): %s',
+ mops_per_thread)
+
+ self.write_test_keyval({'test': itest_cmd})
+ self.write_test_keyval({'time_seconds': time_seconds})
+ self.write_test_keyval({'mops_total': mops_total})
+ self.write_test_keyval({'mops_per_thread': mops_per_thread})
+
+ # A little extra sanity check comes handy
+ if int(m.groups()[0]) != utils.count_cpus():
+ raise error.TestError("NPB test suite evaluated the number "
+ "of threads incorrectly: System appears "
+ "to have %s cores, but %s threads were "
+ "executed.")
+
+ # We will use this integer with float point vars later.
+ full_thrds = float(m.groups()[0])
+
+ # get duration for full_threads running.
+ m = re.search('Time in seconds\s*=\s*(.*)\n', itest.stdout)
+ time_full_thrds = float(m.groups()[0])
+
+ # repeat the execution with single thread.
+ itest_single_cmd = ''.join(['OMP_NUM_THREADS=1 ', itest_cmd])
+ try:
+ itest_single = utils.run(itest_single_cmd)
+ except:
+ logging.error('NPB benchmark single thread %s has failed. '
+ 'Output: %s',
+ itest_single_cmd,
+ itest_single.stdout)
+ self.n_fail += 1
+
+ m = re.search('Time in seconds\s*=\s*(.*)\n', itest_single.stdout)
+ time_one_thrd = float(m.groups()[0])
+
+ # check durations
+ ratio = self.ratio
+ time_ratio = float(time_one_thrd / time_full_thrds)
+ upper_bound = full_thrds * (1 + ratio)
+ lower_bound = full_thrds * (1 - ratio)
+ logging.debug('Time ratio for %s: %s', itest_cmd, time_ratio)
+ logging.debug('Upper bound: %s', upper_bound)
+ logging.debug('Lower bound: %s', lower_bound)
+
+ violates_upper_bound = time_ratio > upper_bound
+ violates_lower_bound = time_ratio < lower_bound
+ if violates_upper_bound or violates_lower_bound:
+ logging.error('NPB benchmark %s failed sanity check '
+ '- time ratio outside bounds' % itest_cmd)
+ self.n_fail += 1
+ else:
+ logging.debug('NPB benchmark %s sanity check PASS' % itest_cmd)
+
+
+ def cleanup(self):
+ """
+ Raise TestError if failures were detected during test execution.
+ """
+ if self.n_fail != 0:
+ raise error.TestError('NPB test failed.')
+ else:
+ logging.info('NPB test passed.')