symbian-qemu-0.9.1-12/python-2.6.1/Tools/pybench/pybench.py
changeset 1 2fb8b9db1c86
equal deleted inserted replaced
0:ffa851df0825 1:2fb8b9db1c86
       
     1 #!/usr/local/bin/python -O
       
     2 
       
     3 """ A Python Benchmark Suite
       
     4 
       
     5 """
       
     6 #
       
     7 # Note: Please keep this module compatible to Python 1.5.2.
       
     8 #
       
     9 # Tests may include features in later Python versions, but these
       
    10 # should then be embedded in try-except clauses in the configuration
       
    11 # module Setup.py.
       
    12 #
       
    13 
       
    14 # pybench Copyright
       
    15 __copyright__ = """\
       
    16 Copyright (c), 1997-2006, Marc-Andre Lemburg (mal@lemburg.com)
       
    17 Copyright (c), 2000-2006, eGenix.com Software GmbH (info@egenix.com)
       
    18 
       
    19                    All Rights Reserved.
       
    20 
       
    21 Permission to use, copy, modify, and distribute this software and its
       
    22 documentation for any purpose and without fee or royalty is hereby
       
    23 granted, provided that the above copyright notice appear in all copies
       
    24 and that both that copyright notice and this permission notice appear
       
    25 in supporting documentation or portions thereof, including
       
    26 modifications, that you make.
       
    27 
       
    28 THE AUTHOR MARC-ANDRE LEMBURG DISCLAIMS ALL WARRANTIES WITH REGARD TO
       
    29 THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
       
    30 FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
       
    31 INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
       
    32 FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
       
    33 NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
       
    34 WITH THE USE OR PERFORMANCE OF THIS SOFTWARE !
       
    35 """
       
    36 
       
    37 import sys, time, operator, string, platform
       
    38 from CommandLine import *
       
    39 
       
    40 try:
       
    41     import cPickle
       
    42     pickle = cPickle
       
    43 except ImportError:
       
    44     import pickle
       
    45 
       
    46 # Version number; version history: see README file !
       
    47 __version__ = '2.0'
       
    48 
       
    49 ### Constants
       
    50 
       
    51 # Second fractions
       
    52 MILLI_SECONDS = 1e3
       
    53 MICRO_SECONDS = 1e6
       
    54 
       
    55 # Percent unit
       
    56 PERCENT = 100
       
    57 
       
    58 # Horizontal line length
       
    59 LINE = 79
       
    60 
       
    61 # Minimum test run-time
       
    62 MIN_TEST_RUNTIME = 1e-3
       
    63 
       
    64 # Number of calibration runs to use for calibrating the tests
       
    65 CALIBRATION_RUNS = 20
       
    66 
       
    67 # Number of calibration loops to run for each calibration run
       
    68 CALIBRATION_LOOPS = 20
       
    69 
       
    70 # Allow skipping calibration ?
       
    71 ALLOW_SKIPPING_CALIBRATION = 1
       
    72 
       
    73 # Timer types
       
    74 TIMER_TIME_TIME = 'time.time'
       
    75 TIMER_TIME_CLOCK = 'time.clock'
       
    76 TIMER_SYSTIMES_PROCESSTIME = 'systimes.processtime'
       
    77 
       
    78 # Choose platform default timer
       
    79 if sys.platform[:3] == 'win':
       
    80     # On WinXP this has 2.5ms resolution
       
    81     TIMER_PLATFORM_DEFAULT = TIMER_TIME_CLOCK
       
    82 else:
       
    83     # On Linux this has 1ms resolution
       
    84     TIMER_PLATFORM_DEFAULT = TIMER_TIME_TIME
       
    85 
       
    86 # Print debug information ?
       
    87 _debug = 0
       
    88 
       
    89 ### Helpers
       
    90 
       
    91 def get_timer(timertype):
       
    92 
       
    93     if timertype == TIMER_TIME_TIME:
       
    94         return time.time
       
    95     elif timertype == TIMER_TIME_CLOCK:
       
    96         return time.clock
       
    97     elif timertype == TIMER_SYSTIMES_PROCESSTIME:
       
    98         import systimes
       
    99         return systimes.processtime
       
   100     else:
       
   101         raise TypeError('unknown timer type: %s' % timertype)
       
   102 
       
   103 def get_machine_details():
       
   104 
       
   105     if _debug:
       
   106         print 'Getting machine details...'
       
   107     buildno, builddate = platform.python_build()
       
   108     python = platform.python_version()
       
   109     try:
       
   110         unichr(100000)
       
   111     except ValueError:
       
   112         # UCS2 build (standard)
       
   113         unicode = 'UCS2'
       
   114     except NameError:
       
   115         unicode = None
       
   116     else:
       
   117         # UCS4 build (most recent Linux distros)
       
   118         unicode = 'UCS4'
       
   119     bits, linkage = platform.architecture()
       
   120     return {
       
   121         'platform': platform.platform(),
       
   122         'processor': platform.processor(),
       
   123         'executable': sys.executable,
       
   124         'implementation': getattr(platform, 'python_implementation',
       
   125                                   lambda:'n/a')(),
       
   126         'python': platform.python_version(),
       
   127         'compiler': platform.python_compiler(),
       
   128         'buildno': buildno,
       
   129         'builddate': builddate,
       
   130         'unicode': unicode,
       
   131         'bits': bits,
       
   132         }
       
   133 
       
   134 def print_machine_details(d, indent=''):
       
   135 
       
   136     l = ['Machine Details:',
       
   137          '   Platform ID:    %s' % d.get('platform', 'n/a'),
       
   138          '   Processor:      %s' % d.get('processor', 'n/a'),
       
   139          '',
       
   140          'Python:',
       
   141          '   Implementation: %s' % d.get('implementation', 'n/a'),
       
   142          '   Executable:     %s' % d.get('executable', 'n/a'),
       
   143          '   Version:        %s' % d.get('python', 'n/a'),
       
   144          '   Compiler:       %s' % d.get('compiler', 'n/a'),
       
   145          '   Bits:           %s' % d.get('bits', 'n/a'),
       
   146          '   Build:          %s (#%s)' % (d.get('builddate', 'n/a'),
       
   147                                           d.get('buildno', 'n/a')),
       
   148          '   Unicode:        %s' % d.get('unicode', 'n/a'),
       
   149          ]
       
   150     print indent + string.join(l, '\n' + indent) + '\n'
       
   151 
       
   152 ### Test baseclass
       
   153 
       
   154 class Test:
       
   155 
       
   156     """ All test must have this class as baseclass. It provides
       
   157         the necessary interface to the benchmark machinery.
       
   158 
       
   159         The tests must set .rounds to a value high enough to let the
       
   160         test run between 20-50 seconds. This is needed because
       
   161         clock()-timing only gives rather inaccurate values (on Linux,
       
   162         for example, it is accurate to a few hundreths of a
       
   163         second). If you don't want to wait that long, use a warp
       
   164         factor larger than 1.
       
   165 
       
   166         It is also important to set the .operations variable to a
       
   167         value representing the number of "virtual operations" done per
       
   168         call of .run().
       
   169 
       
   170         If you change a test in some way, don't forget to increase
       
   171         its version number.
       
   172 
       
   173     """
       
   174 
       
   175     ### Instance variables that each test should override
       
   176 
       
   177     # Version number of the test as float (x.yy); this is important
       
   178     # for comparisons of benchmark runs - tests with unequal version
       
   179     # number will not get compared.
       
   180     version = 2.0
       
   181 
       
   182     # The number of abstract operations done in each round of the
       
   183     # test. An operation is the basic unit of what you want to
       
   184     # measure. The benchmark will output the amount of run-time per
       
   185     # operation. Note that in order to raise the measured timings
       
   186     # significantly above noise level, it is often required to repeat
       
   187     # sets of operations more than once per test round. The measured
       
   188     # overhead per test round should be less than 1 second.
       
   189     operations = 1
       
   190 
       
   191     # Number of rounds to execute per test run. This should be
       
   192     # adjusted to a figure that results in a test run-time of between
       
   193     # 1-2 seconds.
       
   194     rounds = 100000
       
   195 
       
   196     ### Internal variables
       
   197 
       
   198     # Mark this class as implementing a test
       
   199     is_a_test = 1
       
   200 
       
   201     # Last timing: (real, run, overhead)
       
   202     last_timing = (0.0, 0.0, 0.0)
       
   203 
       
   204     # Warp factor to use for this test
       
   205     warp = 1
       
   206 
       
   207     # Number of calibration runs to use
       
   208     calibration_runs = CALIBRATION_RUNS
       
   209 
       
   210     # List of calibration timings
       
   211     overhead_times = None
       
   212 
       
   213     # List of test run timings
       
   214     times = []
       
   215 
       
   216     # Timer used for the benchmark
       
   217     timer = TIMER_PLATFORM_DEFAULT
       
   218 
       
   219     def __init__(self, warp=None, calibration_runs=None, timer=None):
       
   220 
       
   221         # Set parameters
       
   222         if warp is not None:
       
   223             self.rounds = int(self.rounds / warp)
       
   224             if self.rounds == 0:
       
   225                 raise ValueError('warp factor set too high')
       
   226             self.warp = warp
       
   227         if calibration_runs is not None:
       
   228             if (not ALLOW_SKIPPING_CALIBRATION and
       
   229                 calibration_runs < 1):
       
   230                 raise ValueError('at least one calibration run is required')
       
   231             self.calibration_runs = calibration_runs
       
   232         if timer is not None:
       
   233             timer = timer
       
   234 
       
   235         # Init variables
       
   236         self.times = []
       
   237         self.overhead_times = []
       
   238 
       
   239         # We want these to be in the instance dict, so that pickle
       
   240         # saves them
       
   241         self.version = self.version
       
   242         self.operations = self.operations
       
   243         self.rounds = self.rounds
       
   244 
       
   245     def get_timer(self):
       
   246 
       
   247         """ Return the timer function to use for the test.
       
   248 
       
   249         """
       
   250         return get_timer(self.timer)
       
   251 
       
   252     def compatible(self, other):
       
   253 
       
   254         """ Return 1/0 depending on whether the test is compatible
       
   255             with the other Test instance or not.
       
   256 
       
   257         """
       
   258         if self.version != other.version:
       
   259             return 0
       
   260         if self.rounds != other.rounds:
       
   261             return 0
       
   262         return 1
       
   263 
       
   264     def calibrate_test(self):
       
   265 
       
   266         if self.calibration_runs == 0:
       
   267             self.overhead_times = [0.0]
       
   268             return
       
   269 
       
   270         calibrate = self.calibrate
       
   271         timer = self.get_timer()
       
   272         calibration_loops = range(CALIBRATION_LOOPS)
       
   273 
       
   274         # Time the calibration loop overhead
       
   275         prep_times = []
       
   276         for i in range(self.calibration_runs):
       
   277             t = timer()
       
   278             for i in calibration_loops:
       
   279                 pass
       
   280             t = timer() - t
       
   281             prep_times.append(t)
       
   282         min_prep_time = min(prep_times)
       
   283         if _debug:
       
   284             print
       
   285             print 'Calib. prep time     = %.6fms' % (
       
   286                 min_prep_time * MILLI_SECONDS)
       
   287 
       
   288         # Time the calibration runs (doing CALIBRATION_LOOPS loops of
       
   289         # .calibrate() method calls each)
       
   290         for i in range(self.calibration_runs):
       
   291             t = timer()
       
   292             for i in calibration_loops:
       
   293                 calibrate()
       
   294             t = timer() - t
       
   295             self.overhead_times.append(t / CALIBRATION_LOOPS
       
   296                                        - min_prep_time)
       
   297 
       
   298         # Check the measured times
       
   299         min_overhead = min(self.overhead_times)
       
   300         max_overhead = max(self.overhead_times)
       
   301         if _debug:
       
   302             print 'Calib. overhead time = %.6fms' % (
       
   303                 min_overhead * MILLI_SECONDS)
       
   304         if min_overhead < 0.0:
       
   305             raise ValueError('calibration setup did not work')
       
   306         if max_overhead - min_overhead > 0.1:
       
   307             raise ValueError(
       
   308                 'overhead calibration timing range too inaccurate: '
       
   309                 '%r - %r' % (min_overhead, max_overhead))
       
   310 
       
   311     def run(self):
       
   312 
       
   313         """ Run the test in two phases: first calibrate, then
       
   314             do the actual test. Be careful to keep the calibration
       
   315             timing low w/r to the test timing.
       
   316 
       
   317         """
       
   318         test = self.test
       
   319         timer = self.get_timer()
       
   320 
       
   321         # Get calibration
       
   322         min_overhead = min(self.overhead_times)
       
   323 
       
   324         # Test run
       
   325         t = timer()
       
   326         test()
       
   327         t = timer() - t
       
   328         if t < MIN_TEST_RUNTIME:
       
   329             raise ValueError('warp factor too high: '
       
   330                              'test times are < 10ms')
       
   331         eff_time = t - min_overhead
       
   332         if eff_time < 0:
       
   333             raise ValueError('wrong calibration')
       
   334         self.last_timing = (eff_time, t, min_overhead)
       
   335         self.times.append(eff_time)
       
   336 
       
   337     def calibrate(self):
       
   338 
       
   339         """ Calibrate the test.
       
   340 
       
   341             This method should execute everything that is needed to
       
   342             setup and run the test - except for the actual operations
       
   343             that you intend to measure. pybench uses this method to
       
   344             measure the test implementation overhead.
       
   345 
       
   346         """
       
   347         return
       
   348 
       
   349     def test(self):
       
   350 
       
   351         """ Run the test.
       
   352 
       
   353             The test needs to run self.rounds executing
       
   354             self.operations number of operations each.
       
   355 
       
   356         """
       
   357         return
       
   358 
       
   359     def stat(self):
       
   360 
       
   361         """ Return test run statistics as tuple:
       
   362 
       
   363             (minimum run time,
       
   364              average run time,
       
   365              total run time,
       
   366              average time per operation,
       
   367              minimum overhead time)
       
   368 
       
   369         """
       
   370         runs = len(self.times)
       
   371         if runs == 0:
       
   372             return 0.0, 0.0, 0.0, 0.0
       
   373         min_time = min(self.times)
       
   374         total_time = reduce(operator.add, self.times, 0.0)
       
   375         avg_time = total_time / float(runs)
       
   376         operation_avg = total_time / float(runs
       
   377                                            * self.rounds
       
   378                                            * self.operations)
       
   379         if self.overhead_times:
       
   380             min_overhead = min(self.overhead_times)
       
   381         else:
       
   382             min_overhead = self.last_timing[2]
       
   383         return min_time, avg_time, total_time, operation_avg, min_overhead
       
   384 
       
   385 ### Load Setup
       
   386 
       
   387 # This has to be done after the definition of the Test class, since
       
   388 # the Setup module will import subclasses using this class.
       
   389 
       
   390 import Setup
       
   391 
       
   392 ### Benchmark base class
       
   393 
       
   394 class Benchmark:
       
   395 
       
   396     # Name of the benchmark
       
   397     name = ''
       
   398 
       
   399     # Number of benchmark rounds to run
       
   400     rounds = 1
       
   401 
       
   402     # Warp factor use to run the tests
       
   403     warp = 1                    # Warp factor
       
   404 
       
   405     # Average benchmark round time
       
   406     roundtime = 0
       
   407 
       
   408     # Benchmark version number as float x.yy
       
   409     version = 2.0
       
   410 
       
   411     # Produce verbose output ?
       
   412     verbose = 0
       
   413 
       
   414     # Dictionary with the machine details
       
   415     machine_details = None
       
   416 
       
   417     # Timer used for the benchmark
       
   418     timer = TIMER_PLATFORM_DEFAULT
       
   419 
       
   420     def __init__(self, name, verbose=None, timer=None, warp=None,
       
   421                  calibration_runs=None):
       
   422 
       
   423         if name:
       
   424             self.name = name
       
   425         else:
       
   426             self.name = '%04i-%02i-%02i %02i:%02i:%02i' % \
       
   427                         (time.localtime(time.time())[:6])
       
   428         if verbose is not None:
       
   429             self.verbose = verbose
       
   430         if timer is not None:
       
   431             self.timer = timer
       
   432         if warp is not None:
       
   433             self.warp = warp
       
   434         if calibration_runs is not None:
       
   435             self.calibration_runs = calibration_runs
       
   436 
       
   437         # Init vars
       
   438         self.tests = {}
       
   439         if _debug:
       
   440             print 'Getting machine details...'
       
   441         self.machine_details = get_machine_details()
       
   442 
       
   443         # Make .version an instance attribute to have it saved in the
       
   444         # Benchmark pickle
       
   445         self.version = self.version
       
   446 
       
   447     def get_timer(self):
       
   448 
       
   449         """ Return the timer function to use for the test.
       
   450 
       
   451         """
       
   452         return get_timer(self.timer)
       
   453 
       
   454     def compatible(self, other):
       
   455 
       
   456         """ Return 1/0 depending on whether the benchmark is
       
   457             compatible with the other Benchmark instance or not.
       
   458 
       
   459         """
       
   460         if self.version != other.version:
       
   461             return 0
       
   462         if (self.machine_details == other.machine_details and
       
   463             self.timer != other.timer):
       
   464             return 0
       
   465         if (self.calibration_runs == 0 and
       
   466             other.calibration_runs != 0):
       
   467             return 0
       
   468         if (self.calibration_runs != 0 and
       
   469             other.calibration_runs == 0):
       
   470             return 0
       
   471         return 1
       
   472 
       
   473     def load_tests(self, setupmod, limitnames=None):
       
   474 
       
   475         # Add tests
       
   476         if self.verbose:
       
   477             print 'Searching for tests ...'
       
   478             print '--------------------------------------'
       
   479         for testclass in setupmod.__dict__.values():
       
   480             if not hasattr(testclass, 'is_a_test'):
       
   481                 continue
       
   482             name = testclass.__name__
       
   483             if  name == 'Test':
       
   484                 continue
       
   485             if (limitnames is not None and
       
   486                 limitnames.search(name) is None):
       
   487                 continue
       
   488             self.tests[name] = testclass(
       
   489                 warp=self.warp,
       
   490                 calibration_runs=self.calibration_runs,
       
   491                 timer=self.timer)
       
   492         l = self.tests.keys()
       
   493         l.sort()
       
   494         if self.verbose:
       
   495             for name in l:
       
   496                 print '  %s' % name
       
   497             print '--------------------------------------'
       
   498             print '  %i tests found' % len(l)
       
   499             print
       
   500 
       
   501     def calibrate(self):
       
   502 
       
   503         print 'Calibrating tests. Please wait...',
       
   504         sys.stdout.flush()
       
   505         if self.verbose:
       
   506             print
       
   507             print
       
   508             print 'Test                              min      max'
       
   509             print '-' * LINE
       
   510         tests = self.tests.items()
       
   511         tests.sort()
       
   512         for i in range(len(tests)):
       
   513             name, test = tests[i]
       
   514             test.calibrate_test()
       
   515             if self.verbose:
       
   516                 print '%30s:  %6.3fms  %6.3fms' % \
       
   517                       (name,
       
   518                        min(test.overhead_times) * MILLI_SECONDS,
       
   519                        max(test.overhead_times) * MILLI_SECONDS)
       
   520         if self.verbose:
       
   521             print
       
   522             print 'Done with the calibration.'
       
   523         else:
       
   524             print 'done.'
       
   525         print
       
   526 
       
   527     def run(self):
       
   528 
       
   529         tests = self.tests.items()
       
   530         tests.sort()
       
   531         timer = self.get_timer()
       
   532         print 'Running %i round(s) of the suite at warp factor %i:' % \
       
   533               (self.rounds, self.warp)
       
   534         print
       
   535         self.roundtimes = []
       
   536         for i in range(self.rounds):
       
   537             if self.verbose:
       
   538                 print ' Round %-25i  effective   absolute  overhead' % (i+1)
       
   539             total_eff_time = 0.0
       
   540             for j in range(len(tests)):
       
   541                 name, test = tests[j]
       
   542                 if self.verbose:
       
   543                     print '%30s:' % name,
       
   544                 test.run()
       
   545                 (eff_time, abs_time, min_overhead) = test.last_timing
       
   546                 total_eff_time = total_eff_time + eff_time
       
   547                 if self.verbose:
       
   548                     print '    %5.0fms    %5.0fms %7.3fms' % \
       
   549                           (eff_time * MILLI_SECONDS,
       
   550                            abs_time * MILLI_SECONDS,
       
   551                            min_overhead * MILLI_SECONDS)
       
   552             self.roundtimes.append(total_eff_time)
       
   553             if self.verbose:
       
   554                 print ('                   '
       
   555                        '               ------------------------------')
       
   556                 print ('                   '
       
   557                        '     Totals:    %6.0fms' %
       
   558                        (total_eff_time * MILLI_SECONDS))
       
   559                 print
       
   560             else:
       
   561                 print '* Round %i done in %.3f seconds.' % (i+1,
       
   562                                                             total_eff_time)
       
   563         print
       
   564 
       
   565     def stat(self):
       
   566 
       
   567         """ Return benchmark run statistics as tuple:
       
   568 
       
   569             (minimum round time,
       
   570              average round time,
       
   571              maximum round time)
       
   572 
       
   573             XXX Currently not used, since the benchmark does test
       
   574                 statistics across all rounds.
       
   575 
       
   576         """
       
   577         runs = len(self.roundtimes)
       
   578         if runs == 0:
       
   579             return 0.0, 0.0
       
   580         min_time = min(self.roundtimes)
       
   581         total_time = reduce(operator.add, self.roundtimes, 0.0)
       
   582         avg_time = total_time / float(runs)
       
   583         max_time = max(self.roundtimes)
       
   584         return (min_time, avg_time, max_time)
       
   585 
       
   586     def print_header(self, title='Benchmark'):
       
   587 
       
   588         print '-' * LINE
       
   589         print '%s: %s' % (title, self.name)
       
   590         print '-' * LINE
       
   591         print
       
   592         print '    Rounds: %s' % self.rounds
       
   593         print '    Warp:   %s' % self.warp
       
   594         print '    Timer:  %s' % self.timer
       
   595         print
       
   596         if self.machine_details:
       
   597             print_machine_details(self.machine_details, indent='    ')
       
   598             print
       
   599 
       
   600     def print_benchmark(self, hidenoise=0, limitnames=None):
       
   601 
       
   602         print ('Test                          '
       
   603                '   minimum  average  operation  overhead')
       
   604         print '-' * LINE
       
   605         tests = self.tests.items()
       
   606         tests.sort()
       
   607         total_min_time = 0.0
       
   608         total_avg_time = 0.0
       
   609         for name, test in tests:
       
   610             if (limitnames is not None and
       
   611                 limitnames.search(name) is None):
       
   612                 continue
       
   613             (min_time,
       
   614              avg_time,
       
   615              total_time,
       
   616              op_avg,
       
   617              min_overhead) = test.stat()
       
   618             total_min_time = total_min_time + min_time
       
   619             total_avg_time = total_avg_time + avg_time
       
   620             print '%30s:  %5.0fms  %5.0fms  %6.2fus  %7.3fms' % \
       
   621                   (name,
       
   622                    min_time * MILLI_SECONDS,
       
   623                    avg_time * MILLI_SECONDS,
       
   624                    op_avg * MICRO_SECONDS,
       
   625                    min_overhead *MILLI_SECONDS)
       
   626         print '-' * LINE
       
   627         print ('Totals:                        '
       
   628                ' %6.0fms %6.0fms' %
       
   629                (total_min_time * MILLI_SECONDS,
       
   630                 total_avg_time * MILLI_SECONDS,
       
   631                 ))
       
   632         print
       
   633 
       
   634     def print_comparison(self, compare_to, hidenoise=0, limitnames=None):
       
   635 
       
   636         # Check benchmark versions
       
   637         if compare_to.version != self.version:
       
   638             print ('* Benchmark versions differ: '
       
   639                    'cannot compare this benchmark to "%s" !' %
       
   640                    compare_to.name)
       
   641             print
       
   642             self.print_benchmark(hidenoise=hidenoise,
       
   643                                  limitnames=limitnames)
       
   644             return
       
   645 
       
   646         # Print header
       
   647         compare_to.print_header('Comparing with')
       
   648         print ('Test                          '
       
   649                '   minimum run-time        average  run-time')
       
   650         print ('                              '
       
   651                '   this    other   diff    this    other   diff')
       
   652         print '-' * LINE
       
   653 
       
   654         # Print test comparisons
       
   655         tests = self.tests.items()
       
   656         tests.sort()
       
   657         total_min_time = other_total_min_time = 0.0
       
   658         total_avg_time = other_total_avg_time = 0.0
       
   659         benchmarks_compatible = self.compatible(compare_to)
       
   660         tests_compatible = 1
       
   661         for name, test in tests:
       
   662             if (limitnames is not None and
       
   663                 limitnames.search(name) is None):
       
   664                 continue
       
   665             (min_time,
       
   666              avg_time,
       
   667              total_time,
       
   668              op_avg,
       
   669              min_overhead) = test.stat()
       
   670             total_min_time = total_min_time + min_time
       
   671             total_avg_time = total_avg_time + avg_time
       
   672             try:
       
   673                 other = compare_to.tests[name]
       
   674             except KeyError:
       
   675                 other = None
       
   676             if other is None:
       
   677                 # Other benchmark doesn't include the given test
       
   678                 min_diff, avg_diff = 'n/a', 'n/a'
       
   679                 other_min_time = 0.0
       
   680                 other_avg_time = 0.0
       
   681                 tests_compatible = 0
       
   682             else:
       
   683                 (other_min_time,
       
   684                  other_avg_time,
       
   685                  other_total_time,
       
   686                  other_op_avg,
       
   687                  other_min_overhead) = other.stat()
       
   688                 other_total_min_time = other_total_min_time + other_min_time
       
   689                 other_total_avg_time = other_total_avg_time + other_avg_time
       
   690                 if (benchmarks_compatible and
       
   691                     test.compatible(other)):
       
   692                     # Both benchmark and tests are comparible
       
   693                     min_diff = ((min_time * self.warp) /
       
   694                                 (other_min_time * other.warp) - 1.0)
       
   695                     avg_diff = ((avg_time * self.warp) /
       
   696                                 (other_avg_time * other.warp) - 1.0)
       
   697                     if hidenoise and abs(min_diff) < 10.0:
       
   698                         min_diff = ''
       
   699                     else:
       
   700                         min_diff = '%+5.1f%%' % (min_diff * PERCENT)
       
   701                     if hidenoise and abs(avg_diff) < 10.0:
       
   702                         avg_diff = ''
       
   703                     else:
       
   704                         avg_diff = '%+5.1f%%' % (avg_diff * PERCENT)
       
   705                 else:
       
   706                     # Benchmark or tests are not comparible
       
   707                     min_diff, avg_diff = 'n/a', 'n/a'
       
   708                     tests_compatible = 0
       
   709             print '%30s: %5.0fms %5.0fms %7s %5.0fms %5.0fms %7s' % \
       
   710                   (name,
       
   711                    min_time * MILLI_SECONDS,
       
   712                    other_min_time * MILLI_SECONDS * compare_to.warp / self.warp,
       
   713                    min_diff,
       
   714                    avg_time * MILLI_SECONDS,
       
   715                    other_avg_time * MILLI_SECONDS * compare_to.warp / self.warp,
       
   716                    avg_diff)
       
   717         print '-' * LINE
       
   718 
       
   719         # Summarise test results
       
   720         if not benchmarks_compatible or not tests_compatible:
       
   721             min_diff, avg_diff = 'n/a', 'n/a'
       
   722         else:
       
   723             if other_total_min_time != 0.0:
       
   724                 min_diff = '%+5.1f%%' % (
       
   725                     ((total_min_time * self.warp) /
       
   726                      (other_total_min_time * compare_to.warp) - 1.0) * PERCENT)
       
   727             else:
       
   728                 min_diff = 'n/a'
       
   729             if other_total_avg_time != 0.0:
       
   730                 avg_diff = '%+5.1f%%' % (
       
   731                     ((total_avg_time * self.warp) /
       
   732                      (other_total_avg_time * compare_to.warp) - 1.0) * PERCENT)
       
   733             else:
       
   734                 avg_diff = 'n/a'
       
   735         print ('Totals:                       '
       
   736                '  %5.0fms %5.0fms %7s %5.0fms %5.0fms %7s' %
       
   737                (total_min_time * MILLI_SECONDS,
       
   738                 (other_total_min_time * compare_to.warp/self.warp
       
   739                  * MILLI_SECONDS),
       
   740                 min_diff,
       
   741                 total_avg_time * MILLI_SECONDS,
       
   742                 (other_total_avg_time * compare_to.warp/self.warp
       
   743                  * MILLI_SECONDS),
       
   744                 avg_diff
       
   745                ))
       
   746         print
       
   747         print '(this=%s, other=%s)' % (self.name,
       
   748                                        compare_to.name)
       
   749         print
       
   750 
       
   751 class PyBenchCmdline(Application):
       
   752 
       
   753     header = ("PYBENCH - a benchmark test suite for Python "
       
   754               "interpreters/compilers.")
       
   755 
       
   756     version = __version__
       
   757 
       
   758     debug = _debug
       
   759 
       
   760     options = [ArgumentOption('-n',
       
   761                               'number of rounds',
       
   762                               Setup.Number_of_rounds),
       
   763                ArgumentOption('-f',
       
   764                               'save benchmark to file arg',
       
   765                               ''),
       
   766                ArgumentOption('-c',
       
   767                               'compare benchmark with the one in file arg',
       
   768                               ''),
       
   769                ArgumentOption('-s',
       
   770                               'show benchmark in file arg, then exit',
       
   771                               ''),
       
   772                ArgumentOption('-w',
       
   773                               'set warp factor to arg',
       
   774                               Setup.Warp_factor),
       
   775                ArgumentOption('-t',
       
   776                               'run only tests with names matching arg',
       
   777                               ''),
       
   778                ArgumentOption('-C',
       
   779                               'set the number of calibration runs to arg',
       
   780                               CALIBRATION_RUNS),
       
   781                SwitchOption('-d',
       
   782                             'hide noise in comparisons',
       
   783                             0),
       
   784                SwitchOption('-v',
       
   785                             'verbose output (not recommended)',
       
   786                             0),
       
   787                SwitchOption('--with-gc',
       
   788                             'enable garbage collection',
       
   789                             0),
       
   790                SwitchOption('--with-syscheck',
       
   791                             'use default sys check interval',
       
   792                             0),
       
   793                ArgumentOption('--timer',
       
   794                             'use given timer',
       
   795                             TIMER_PLATFORM_DEFAULT),
       
   796                ]
       
   797 
       
   798     about = """\
       
   799 The normal operation is to run the suite and display the
       
   800 results. Use -f to save them for later reuse or comparisons.
       
   801 
       
   802 Available timers:
       
   803 
       
   804    time.time
       
   805    time.clock
       
   806    systimes.processtime
       
   807 
       
   808 Examples:
       
   809 
       
   810 python2.1 pybench.py -f p21.pybench
       
   811 python2.5 pybench.py -f p25.pybench
       
   812 python pybench.py -s p25.pybench -c p21.pybench
       
   813 """
       
   814     copyright = __copyright__
       
   815 
       
   816     def main(self):
       
   817 
       
   818         rounds = self.values['-n']
       
   819         reportfile = self.values['-f']
       
   820         show_bench = self.values['-s']
       
   821         compare_to = self.values['-c']
       
   822         hidenoise = self.values['-d']
       
   823         warp = int(self.values['-w'])
       
   824         withgc = self.values['--with-gc']
       
   825         limitnames = self.values['-t']
       
   826         if limitnames:
       
   827             if _debug:
       
   828                 print '* limiting test names to one with substring "%s"' % \
       
   829                       limitnames
       
   830             limitnames = re.compile(limitnames, re.I)
       
   831         else:
       
   832             limitnames = None
       
   833         verbose = self.verbose
       
   834         withsyscheck = self.values['--with-syscheck']
       
   835         calibration_runs = self.values['-C']
       
   836         timer = self.values['--timer']
       
   837 
       
   838         print '-' * LINE
       
   839         print 'PYBENCH %s' % __version__
       
   840         print '-' * LINE
       
   841         print '* using %s %s' % (
       
   842             getattr(platform, 'python_implementation', lambda:'Python')(),
       
   843             string.join(string.split(sys.version), ' '))
       
   844 
       
   845         # Switch off garbage collection
       
   846         if not withgc:
       
   847             try:
       
   848                 import gc
       
   849             except ImportError:
       
   850                 print '* Python version doesn\'t support garbage collection'
       
   851             else:
       
   852                 try:
       
   853                     gc.disable()
       
   854                 except NotImplementedError:
       
   855                     print '* Python version doesn\'t support gc.disable'
       
   856                 else:
       
   857                     print '* disabled garbage collection'
       
   858 
       
   859         # "Disable" sys check interval
       
   860         if not withsyscheck:
       
   861             # Too bad the check interval uses an int instead of a long...
       
   862             value = 2147483647
       
   863             try:
       
   864                 sys.setcheckinterval(value)
       
   865             except (AttributeError, NotImplementedError):
       
   866                 print '* Python version doesn\'t support sys.setcheckinterval'
       
   867             else:
       
   868                 print '* system check interval set to maximum: %s' % value
       
   869 
       
   870         if timer == TIMER_SYSTIMES_PROCESSTIME:
       
   871             import systimes
       
   872             print '* using timer: systimes.processtime (%s)' % \
       
   873                   systimes.SYSTIMES_IMPLEMENTATION
       
   874         else:
       
   875             print '* using timer: %s' % timer
       
   876 
       
   877         print
       
   878 
       
   879         if compare_to:
       
   880             try:
       
   881                 f = open(compare_to,'rb')
       
   882                 bench = pickle.load(f)
       
   883                 bench.name = compare_to
       
   884                 f.close()
       
   885                 compare_to = bench
       
   886             except IOError, reason:
       
   887                 print '* Error opening/reading file %s: %s' % (
       
   888                     repr(compare_to),
       
   889                     reason)
       
   890                 compare_to = None
       
   891 
       
   892         if show_bench:
       
   893             try:
       
   894                 f = open(show_bench,'rb')
       
   895                 bench = pickle.load(f)
       
   896                 bench.name = show_bench
       
   897                 f.close()
       
   898                 bench.print_header()
       
   899                 if compare_to:
       
   900                     bench.print_comparison(compare_to,
       
   901                                            hidenoise=hidenoise,
       
   902                                            limitnames=limitnames)
       
   903                 else:
       
   904                     bench.print_benchmark(hidenoise=hidenoise,
       
   905                                           limitnames=limitnames)
       
   906             except IOError, reason:
       
   907                 print '* Error opening/reading file %s: %s' % (
       
   908                     repr(show_bench),
       
   909                     reason)
       
   910                 print
       
   911             return
       
   912 
       
   913         if reportfile:
       
   914             print 'Creating benchmark: %s (rounds=%i, warp=%i)' % \
       
   915                   (reportfile, rounds, warp)
       
   916             print
       
   917 
       
   918         # Create benchmark object
       
   919         bench = Benchmark(reportfile,
       
   920                           verbose=verbose,
       
   921                           timer=timer,
       
   922                           warp=warp,
       
   923                           calibration_runs=calibration_runs)
       
   924         bench.rounds = rounds
       
   925         bench.load_tests(Setup, limitnames=limitnames)
       
   926         try:
       
   927             bench.calibrate()
       
   928             bench.run()
       
   929         except KeyboardInterrupt:
       
   930             print
       
   931             print '*** KeyboardInterrupt -- Aborting'
       
   932             print
       
   933             return
       
   934         bench.print_header()
       
   935         if compare_to:
       
   936             bench.print_comparison(compare_to,
       
   937                                    hidenoise=hidenoise,
       
   938                                    limitnames=limitnames)
       
   939         else:
       
   940             bench.print_benchmark(hidenoise=hidenoise,
       
   941                                   limitnames=limitnames)
       
   942 
       
   943         # Ring bell
       
   944         sys.stderr.write('\007')
       
   945 
       
   946         if reportfile:
       
   947             try:
       
   948                 f = open(reportfile,'wb')
       
   949                 bench.name = reportfile
       
   950                 pickle.dump(bench,f)
       
   951                 f.close()
       
   952             except IOError, reason:
       
   953                 print '* Error opening/writing reportfile'
       
   954             except IOError, reason:
       
   955                 print '* Error opening/writing reportfile %s: %s' % (
       
   956                     reportfile,
       
   957                     reason)
       
   958                 print
       
   959 
       
   960 if __name__ == '__main__':
       
   961     PyBenchCmdline()