aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHubert Tong <hubert.reinterpretcast@gmail.com>2019-09-13 20:17:59 +0000
committerHubert Tong <hubert.reinterpretcast@gmail.com>2019-09-13 20:17:59 +0000
commite17846c286a0c9de968ea8593deb0ab2ecfd939a (patch)
tree283f62a626c8daa833872365d467355a06824977
parentb17dc31ef56dcb3a822f886142f764b0c6689698 (diff)
[LNT] Python 3 support: print statements
Summary: This patch applies `2to3 -f print` fixes, corrects the indentation mangled by `2to3` for multiline print statements, and adds `from __future__ import print_function` to each file that was modified. As requested on review, spaces are then added after commas separating arguments to `print`, separating function call arguments or tuple elements within arguments to `print`, etc. The changes cover the files found to be affected when running tests (without result submission). Reviewers: cmatthews, thopre, kristof.beyls Reviewed By: cmatthews Subscribers: llvm-commits Differential Revision: https://reviews.llvm.org/D67532 git-svn-id: https://llvm.org/svn/llvm-project/lnt/trunk@371891 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--lnt/external/stats/pstat.py13
-rw-r--r--lnt/external/stats/stats.py105
-rw-r--r--lnt/lnttool/create.py33
-rw-r--r--lnt/lnttool/main.py23
-rw-r--r--lnt/server/ui/app.py9
-rw-r--r--lnt/tests/builtintest.py7
-rw-r--r--lnt/tests/compile.py11
-rw-r--r--lnt/tests/nt.py111
-rw-r--r--lnt/tests/test_suite.py3
-rw-r--r--lnt/util/ImportData.py79
-rw-r--r--lnt/util/ServerUtil.py15
-rw-r--r--lnt/util/wsgi_restart.py7
12 files changed, 214 insertions, 202 deletions
diff --git a/lnt/external/stats/pstat.py b/lnt/external/stats/pstat.py
index 90f5696..68650be 100644
--- a/lnt/external/stats/pstat.py
+++ b/lnt/external/stats/pstat.py
@@ -105,6 +105,7 @@ functions/methods. Their inclusion here is for function name consistency.
##
## 11/08/98 ... fixed aput to output large arrays correctly
+from __future__ import print_function
import stats # required 3rd party module
import string, copy
from types import *
@@ -514,14 +515,14 @@ Returns: None
maxsize[col] = max(map(len,items)) + extra
for row in lst:
if row == ['\n'] or row == '\n' or row == '' or row == ['']:
- print
+ print()
elif row == ['dashes'] or row == 'dashes':
dashes = [0]*len(maxsize)
for j in range(len(maxsize)):
dashes[j] = '-'*(maxsize[j]-2)
- print lineincustcols(dashes,maxsize)
+ print(lineincustcols(dashes, maxsize))
else:
- print lineincustcols(row,maxsize)
+ print(lineincustcols(row, maxsize))
return None
@@ -534,7 +535,7 @@ Usage: printincols (listoflists,colsize)
Returns: None
"""
for row in listoflists:
- print lineincols(row,colsize)
+ print(lineincols(row, colsize))
return None
@@ -547,9 +548,9 @@ Returns: None
"""
for row in listoflists:
if row[-1] == '\n':
- print row,
+ print(row, end=' ')
else:
- print row
+ print(row)
return None
diff --git a/lnt/external/stats/stats.py b/lnt/external/stats/stats.py
index 3cb2fef..ab196fd 100644
--- a/lnt/external/stats/stats.py
+++ b/lnt/external/stats/stats.py
@@ -48,8 +48,8 @@ This is a handy way to keep consistent function names when different
argument types require different functions to be called. Having
implementated the Dispatch class, however, means that to get info on
a given function, you must use the REAL function name ... that is
-"print stats.lmean.__doc__" or "print stats.amean.__doc__" work fine,
-while "print stats.mean.__doc__" will print the doc for the Dispatch
+"print(stats.lmean.__doc__)" or "print(stats.amean.__doc__)" work fine,
+while "print(stats.mean.__doc__)" will print the doc for the Dispatch
class. NUMPY FUNCTIONS ('a' prefix) generally have more argument options
but should otherwise be consistent with the corresponding list functions.
@@ -222,6 +222,7 @@ SUPPORT FUNCTIONS: writecc
## changed name of skewness and askewness to skew and askew
## fixed (a)histogram (which sometimes counted points <lowerlimit)
+from __future__ import print_function
import pstat # required 3rd party module
import math, string, copy # required python modules
from types import *
@@ -473,7 +474,7 @@ given by inlist.
Usage: lscoreatpercentile(inlist,percent)
"""
if percent > 1:
- print "\nDividing percent>1 by 100 in lscoreatpercentile().\n"
+ print("\nDividing percent>1 by 100 in lscoreatpercentile().\n")
percent = percent / 100.0
targetcf = percent*len(inlist)
h, lrl, binsize, extras = histogram(inlist)
@@ -535,7 +536,7 @@ Returns: list of bin values, lowerreallimit, binsize, extrapoints
except:
extrapoints = extrapoints + 1
if (extrapoints > 0 and printextras == 1):
- print '\nPoints outside given histogram range =',extrapoints
+ print('\nPoints outside given histogram range =', extrapoints)
return (bins, lowerreallimit, binsize, extrapoints)
@@ -777,11 +778,11 @@ Returns: appropriate statistic name, value, and probability
"""
samples = ''
while samples not in ['i','r','I','R','c','C']:
- print '\nIndependent or related samples, or correlation (i,r,c): ',
+ print('\nIndependent or related samples, or correlation (i,r,c): ', end=' ')
samples = raw_input()
if samples in ['i','I','r','R']:
- print '\nComparing variances ...',
+ print('\nComparing variances ...', end=' ')
# USE O'BRIEN'S TEST FOR HOMOGENEITY OF VARIANCE, Maxwell & delaney, p.112
r = obrientransform(x,y)
f,p = F_oneway(pstat.colex(r,0),pstat.colex(r,1))
@@ -789,45 +790,45 @@ Returns: appropriate statistic name, value, and probability
vartype='unequal, p='+str(round(p,4))
else:
vartype='equal'
- print vartype
+ print(vartype)
if samples in ['i','I']:
if vartype[0]=='e':
t,p = ttest_ind(x,y,0)
- print '\nIndependent samples t-test: ', round(t,4),round(p,4)
+ print('\nIndependent samples t-test: ', round(t, 4), round(p, 4))
else:
if len(x)>20 or len(y)>20:
z,p = ranksums(x,y)
- print '\nRank Sums test (NONparametric, n>20): ', round(z,4),round(p,4)
+ print('\nRank Sums test (NONparametric, n>20): ', round(z, 4), round(p, 4))
else:
u,p = mannwhitneyu(x,y)
- print '\nMann-Whitney U-test (NONparametric, ns<20): ', round(u,4),round(p,4)
+ print('\nMann-Whitney U-test (NONparametric, ns<20): ', round(u, 4), round(p, 4))
else: # RELATED SAMPLES
if vartype[0]=='e':
t,p = ttest_rel(x,y,0)
- print '\nRelated samples t-test: ', round(t,4),round(p,4)
+ print('\nRelated samples t-test: ', round(t, 4), round(p, 4))
else:
t,p = ranksums(x,y)
- print '\nWilcoxon T-test (NONparametric): ', round(t,4),round(p,4)
+ print('\nWilcoxon T-test (NONparametric): ', round(t, 4), round(p, 4))
else: # CORRELATION ANALYSIS
corrtype = ''
while corrtype not in ['c','C','r','R','d','D']:
- print '\nIs the data Continuous, Ranked, or Dichotomous (c,r,d): ',
+ print('\nIs the data Continuous, Ranked, or Dichotomous (c,r,d): ', end=' ')
corrtype = raw_input()
if corrtype in ['c','C']:
m,b,r,p,see = linregress(x,y)
- print '\nLinear regression for continuous variables ...'
+ print('\nLinear regression for continuous variables ...')
lol = [['Slope','Intercept','r','Prob','SEestimate'],[round(m,4),round(b,4),round(r,4),round(p,4),round(see,4)]]
pstat.printcc(lol)
elif corrtype in ['r','R']:
r,p = spearmanr(x,y)
- print '\nCorrelation for ranked variables ...'
- print "Spearman's r: ",round(r,4),round(p,4)
+ print('\nCorrelation for ranked variables ...')
+ print("Spearman's r: ", round(r, 4), round(p, 4))
else: # DICHOTOMOUS
r,p = pointbiserialr(x,y)
- print '\nAssuming x contains a dichotomous variable ...'
- print 'Point Biserial r: ',round(r,4),round(p,4)
- print '\n\n'
+ print('\nAssuming x contains a dichotomous variable ...')
+ print('Point Biserial r: ', round(r, 4), round(p, 4))
+ print('\n\n')
return None
@@ -1501,7 +1502,7 @@ Usage: lbetacf(a,b,x)
bz = 1.0
if (abs(az-aold)<(EPS*abs(az))):
return az
- print 'a or b too big, or ITMAX too small in Betacf.'
+ print('a or b too big, or ITMAX too small in Betacf.')
def lgammln(xx):
@@ -1821,11 +1822,11 @@ Returns: None
lofl = title+[[name1,n1,round(m1,3),round(math.sqrt(se1),3),min1,max1],
[name2,n2,round(m2,3),round(math.sqrt(se2),3),min2,max2]]
if type(fname)<>StringType or len(fname)==0:
- print
- print statname
- print
+ print()
+ print(statname)
+ print()
pstat.printcc(lofl)
- print
+ print()
try:
if stat.shape == ():
stat = stat[0]
@@ -1833,8 +1834,8 @@ Returns: None
prob = prob[0]
except:
pass
- print 'Test statistic = ',round(stat,3),' p = ',round(prob,3),suffix
- print
+ print('Test statistic = ', round(stat, 3), ' p = ', round(prob, 3), suffix)
+ print()
else:
file = open(fname,writemode)
file.write('\n'+statname+'\n\n')
@@ -2417,7 +2418,7 @@ Returns: skew of vals in a along dimension, returning ZERO where all vals equal
denom = N.power(amoment(a,2,dimension),1.5)
zero = N.equal(denom,0)
if type(denom) == N.ndarray and asum(zero) <> 0:
- print "Number of zeros in askew: ",asum(zero)
+ print("Number of zeros in askew: ", asum(zero))
denom = denom + zero # prevent divide-by-zero
return N.where(zero, 0, amoment(a,3,dimension)/denom)
@@ -2436,7 +2437,7 @@ Returns: kurtosis of values in a along dimension, and ZERO where all vals equal
denom = N.power(amoment(a,2,dimension),2)
zero = N.equal(denom,0)
if type(denom) == N.ndarray and asum(zero) <> 0:
- print "Number of zeros in akurtosis: ",asum(zero)
+ print("Number of zeros in akurtosis: ", asum(zero))
denom = denom + zero # prevent divide-by-zero
return N.where(zero,0,amoment(a,4,dimension)/denom)
@@ -2506,7 +2507,7 @@ Returns: z-score and 2-tail z-probability, returns 0 for bad pixels
dimension = 0
n = float(a.shape[dimension])
if n<20:
- print "akurtosistest only valid for n>=20 ... continuing anyway, n=",n
+ print("akurtosistest only valid for n>=20 ... continuing anyway, n=", n)
b2 = akurtosis(a,dimension)
E = 3.0*(n-1) /(n+1)
varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1)*(n+3)*(n+5))
@@ -2629,7 +2630,7 @@ Returns: (array of bin counts, bin-minimum, min-width, #-points-outside-range)
except: # point outside lower/upper limits
extrapoints = extrapoints + 1
if (extrapoints > 0 and printextras == 1):
- print '\nPoints outside given histogram range =',extrapoints
+ print('\nPoints outside given histogram range =', extrapoints)
return (bins, lowerreallimit, binsize, extrapoints)
@@ -3001,11 +3002,11 @@ Returns: appropriate statistic name, value, and probability
"""
samples = ''
while samples not in ['i','r','I','R','c','C']:
- print '\nIndependent or related samples, or correlation (i,r,c): ',
+ print('\nIndependent or related samples, or correlation (i,r,c): ', end=' ')
samples = raw_input()
if samples in ['i','I','r','R']:
- print '\nComparing variances ...',
+ print('\nComparing variances ...', end=' ')
# USE O'BRIEN'S TEST FOR HOMOGENEITY OF VARIANCE, Maxwell & delaney, p.112
r = obrientransform(x,y)
f,p = F_oneway(pstat.colex(r,0),pstat.colex(r,1))
@@ -3013,45 +3014,45 @@ Returns: appropriate statistic name, value, and probability
vartype='unequal, p='+str(round(p,4))
else:
vartype='equal'
- print vartype
+ print(vartype)
if samples in ['i','I']:
if vartype[0]=='e':
t,p = ttest_ind(x,y,None,0)
- print '\nIndependent samples t-test: ', round(t,4),round(p,4)
+ print('\nIndependent samples t-test: ', round(t, 4), round(p, 4))
else:
if len(x)>20 or len(y)>20:
z,p = ranksums(x,y)
- print '\nRank Sums test (NONparametric, n>20): ', round(z,4),round(p,4)
+ print('\nRank Sums test (NONparametric, n>20): ', round(z, 4), round(p, 4))
else:
u,p = mannwhitneyu(x,y)
- print '\nMann-Whitney U-test (NONparametric, ns<20): ', round(u,4),round(p,4)
+ print('\nMann-Whitney U-test (NONparametric, ns<20): ', round(u, 4), round(p, 4))
else: # RELATED SAMPLES
if vartype[0]=='e':
t,p = ttest_rel(x,y,0)
- print '\nRelated samples t-test: ', round(t,4),round(p,4)
+ print('\nRelated samples t-test: ', round(t, 4), round(p, 4))
else:
t,p = ranksums(x,y)
- print '\nWilcoxon T-test (NONparametric): ', round(t,4),round(p,4)
+ print('\nWilcoxon T-test (NONparametric): ', round(t, 4), round(p, 4))
else: # CORRELATION ANALYSIS
corrtype = ''
while corrtype not in ['c','C','r','R','d','D']:
- print '\nIs the data Continuous, Ranked, or Dichotomous (c,r,d): ',
+ print('\nIs the data Continuous, Ranked, or Dichotomous (c,r,d): ', end=' ')
corrtype = raw_input()
if corrtype in ['c','C']:
m,b,r,p,see = linregress(x,y)
- print '\nLinear regression for continuous variables ...'
+ print('\nLinear regression for continuous variables ...')
lol = [['Slope','Intercept','r','Prob','SEestimate'],[round(m,4),round(b,4),round(r,4),round(p,4),round(see,4)]]
pstat.printcc(lol)
elif corrtype in ['r','R']:
r,p = spearmanr(x,y)
- print '\nCorrelation for ranked variables ...'
- print "Spearman's r: ",round(r,4),round(p,4)
+ print('\nCorrelation for ranked variables ...')
+ print("Spearman's r: ", round(r, 4), round(p, 4))
else: # DICHOTOMOUS
r,p = pointbiserialr(x,y)
- print '\nAssuming x contains a dichotomous variable ...'
- print 'Point Biserial r: ',round(r,4),round(p,4)
- print '\n\n'
+ print('\nAssuming x contains a dichotomous variable ...')
+ print('Point Biserial r: ', round(r, 4), round(p, 4))
+ print('\n\n')
return None
@@ -3284,7 +3285,7 @@ Returns: slope, intercept, r, two-tailed prob, sterr-of-the-estimate, n
shp = N.ones(len(y.shape))
shp[0] = len(x)
x.shape = shp
- print x.shape, y.shape
+ print(x.shape, y.shape)
r_num = n*(N.add.reduce(x*y,0)) - N.add.reduce(x)*N.add.reduce(y,0)
r_den = N.sqrt((n*ass(x) - asquare_of_sums(x))*(n*ass(y,0)-asquare_of_sums(y,0)))
zerodivproblem = N.equal(r_den,0)
@@ -3404,15 +3405,15 @@ Returns: an array of t-values with the shape of pval
pval = abs(pval)
t = N.ones(pval.shape,N.float_)*50
step = N.ones(pval.shape,N.float_)*25
- print "Initial ap2t() prob calc"
+ print("Initial ap2t() prob calc")
prob = abetai(0.5*df,0.5,float(df)/(df+t*t))
- print 'ap2t() iter: ',
+ print('ap2t() iter: ', end=' ')
for i in range(10):
- print i,' ',
+ print(i, ' ', end=' ')
t = N.where(pval<prob,t+step,t-step)
prob = abetai(0.5*df,0.5,float(df)/(df+t*t))
step = step/2
- print
+ print()
# since this is an ugly hack, we get ugly boundaries
t = N.where(t>99.9,1000,t) # hit upper-boundary
t = t+signs
@@ -3935,7 +3936,7 @@ Usage: abetacf(a,b,x,verbose=1)
mask = N.clip(mask+newmask,0,1)
noconverge = asum(N.equal(frozen,-1))
if noconverge <> 0 and verbose:
- print 'a or b too big, or ITMAX too small in Betacf for ',noconverge,' elements'
+ print('a or b too big, or ITMAX too small in Betacf for ', noconverge, ' elements')
if arrayflag:
return frozen
else:
@@ -4020,7 +4021,7 @@ Usage: aglm(data,para)
Returns: statistic, p-value ???
"""
if len(para) <> len(data):
- print "data and para must be same length in aglm"
+ print("data and para must be same length in aglm")
return
n = len(para)
p = pstat.aunique(para)
diff --git a/lnt/lnttool/create.py b/lnt/lnttool/create.py
index eaf90a1..90b1af7 100644
--- a/lnt/lnttool/create.py
+++ b/lnt/lnttool/create.py
@@ -1,3 +1,4 @@
+from __future__ import print_function
import click
import platform
@@ -164,19 +165,19 @@ LNT configuration.
# Execute an upgrade on the database to initialize the schema.
lnt.server.db.migrate.update_path(db_path)
- print 'created LNT configuration in %r' % basepath
- print ' configuration file: %s' % cfg_path
- print ' WSGI app : %s' % wsgi_path
- print ' database file : %s' % db_path
- print ' temporary dir : %s' % tmp_path
- print ' host URL : %s' % hosturl
- print
- print 'You can execute:'
- print ' %s' % wsgi_path
- print 'to test your installation with the builtin server.'
- print
- print 'For production use configure this application to run with any'
- print 'WSGI capable web server. You may need to modify the permissions'
- print 'on the database and temporary file directory to allow writing'
- print 'by the web app.'
- print
+ print('created LNT configuration in %r' % basepath)
+ print(' configuration file: %s' % cfg_path)
+ print(' WSGI app : %s' % wsgi_path)
+ print(' database file : %s' % db_path)
+ print(' temporary dir : %s' % tmp_path)
+ print(' host URL : %s' % hosturl)
+ print()
+ print('You can execute:')
+ print(' %s' % wsgi_path)
+ print('to test your installation with the builtin server.')
+ print()
+ print('For production use configure this application to run with any')
+ print('WSGI capable web server. You may need to modify the permissions')
+ print('on the database and temporary file directory to allow writing')
+ print('by the web app.')
+ print()
diff --git a/lnt/lnttool/main.py b/lnt/lnttool/main.py
index dec384e..050fb58 100644
--- a/lnt/lnttool/main.py
+++ b/lnt/lnttool/main.py
@@ -1,4 +1,5 @@
"""Implement the command line 'lnt' tool."""
+from __future__ import print_function
from .common import init_logger
from .common import submit_options
from .convert import action_convert
@@ -134,11 +135,11 @@ def _print_result_url(results, verbose):
result_url = results.get('result_url')
if result_url is not None:
if verbose:
- print "Results available at:", result_url
+ print("Results available at:", result_url)
else:
- print result_url
+ print(result_url)
elif verbose:
- print "Results available at: no URL available"
+ print("Results available at: no URL available")
class RunTestCLI(click.MultiCommand):
@@ -167,13 +168,13 @@ def action_showtests():
import lnt.tests
import inspect
- print 'Available tests:'
+ print('Available tests:')
test_names = lnt.tests.get_names()
max_name = max(map(len, test_names))
for name in test_names:
test_module = lnt.tests.get_module(name)
description = inspect.cleandoc(test_module.__doc__)
- print ' %-*s - %s' % (max_name, name, description)
+ print(' %-*s - %s' % (max_name, name, description))
@click.command("submit")
@@ -409,7 +410,7 @@ def command_update(input, output):
def command_get_version(input):
"""print the version of a profile"""
import lnt.testing.profile.profile as profile
- print profile.Profile.fromFile(input).getVersion()
+ print(profile.Profile.fromFile(input).getVersion())
@action_profile.command("getTopLevelCounters")
@@ -418,7 +419,7 @@ def command_top_level_counters(input):
"""print the whole-profile counter values"""
import json
import lnt.testing.profile.profile as profile
- print json.dumps(profile.Profile.fromFile(input).getTopLevelCounters())
+ print(json.dumps(profile.Profile.fromFile(input).getTopLevelCounters()))
@action_profile.command("getFunctions")
@@ -427,7 +428,7 @@ def command_get_functions(input):
"""print the functions in a profile"""
import json
import lnt.testing.profile.profile as profile
- print json.dumps(profile.Profile.fromFile(input).getFunctions())
+ print(json.dumps(profile.Profile.fromFile(input).getFunctions()))
@action_profile.command("getCodeForFunction")
@@ -437,8 +438,8 @@ def command_code_for_function(input, fn):
"""print the code/instruction for a function"""
import json
import lnt.testing.profile.profile as profile
- print json.dumps(
- list(profile.Profile.fromFile(input).getCodeForFunction(fn)))
+ print(json.dumps(
+ list(profile.Profile.fromFile(input).getCodeForFunction(fn))))
def _version_check():
@@ -472,7 +473,7 @@ def show_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
if lnt.__version__:
- print "LNT %s" % (lnt.__version__,)
+ print("LNT %s" % (lnt.__version__, ))
ctx.exit()
diff --git a/lnt/server/ui/app.py b/lnt/server/ui/app.py
index 5cffe42..98f06c6 100644
--- a/lnt/server/ui/app.py
+++ b/lnt/server/ui/app.py
@@ -1,3 +1,4 @@
+from __future__ import print_function
import StringIO
import logging
import logging.handlers
@@ -262,12 +263,12 @@ class App(LNTExceptionLoggerFlask):
rotating.setLevel(logging.DEBUG)
self.logger.addHandler(rotating)
except (OSError, IOError) as e:
- print >> sys.stderr, "Error making log file", \
- LOG_FILENAME, str(e)
- print >> sys.stderr, "Will not log to file."
+ print("Error making log file", \
+ LOG_FILENAME, str(e), file=sys.stderr)
+ print("Will not log to file.", file=sys.stderr)
else:
self.logger.info("Started file logging.")
- print "Logging to :", LOG_FILENAME
+ print("Logging to :", LOG_FILENAME)
else:
self.config['log_file_name'] = log_file_name
diff --git a/lnt/tests/builtintest.py b/lnt/tests/builtintest.py
index 34a3f01..f0a1f59 100644
--- a/lnt/tests/builtintest.py
+++ b/lnt/tests/builtintest.py
@@ -2,6 +2,7 @@
Base class for builtin-in tests.
"""
+from __future__ import print_function
import sys
import os
@@ -43,7 +44,7 @@ class BuiltinTest(object):
def log(self, message, ts=None):
if not ts:
ts = timestamp()
- print >>sys.stderr, '%s: %s' % (ts, message)
+ print('%s: %s' % (ts, message), file=sys.stderr)
@staticmethod
def print_report(report, output):
@@ -52,7 +53,7 @@ class BuiltinTest(object):
output_stream = sys.stdout
else:
output_stream = open(output, 'w')
- print >> output_stream, report.render()
+ print(report.render(), file=output_stream)
if output_stream is not sys.stdout:
output_stream.close()
@@ -87,4 +88,4 @@ class BuiltinTest(object):
"""Print the result URL"""
result_url = server_results.get('result_url', None)
if result_url is not None:
- print "Results available at:", server_results['result_url']
+ print("Results available at:", server_results['result_url'])
diff --git a/lnt/tests/compile.py b/lnt/tests/compile.py
index 62e343e..eeca8fe 100644
--- a/lnt/tests/compile.py
+++ b/lnt/tests/compile.py
@@ -1,4 +1,5 @@
"""Single file compile-time performance testing"""
+from __future__ import print_function
import errno
import hashlib
import json
@@ -759,8 +760,8 @@ class CompileTest(builtintest.BuiltinTest):
# Set up the sandbox.
global g_output_dir
if not os.path.exists(opts.sandbox_path):
- print >>sys.stderr, "%s: creating sandbox: %r" % (
- timestamp(), opts.sandbox_path)
+ print("%s: creating sandbox: %r" % (
+ timestamp(), opts.sandbox_path), file=sys.stderr)
os.mkdir(opts.sandbox_path)
if opts.timestamp_build:
fmt_timestamp = timestamp().replace(' ', '_').replace(':', '-')
@@ -888,10 +889,10 @@ class CompileTest(builtintest.BuiltinTest):
# Show the tests, if requested.
if opts.show_tests:
- print >>sys.stderr, 'Available Tests'
+ print('Available Tests', file=sys.stderr)
for name in sorted(set(name for name, _ in all_tests)):
- print >>sys.stderr, ' %s' % (name,)
- print
+ print(' %s' % (name, ), file=sys.stderr)
+ print()
raise SystemExit
# Find the tests to run.
diff --git a/lnt/tests/nt.py b/lnt/tests/nt.py
index 4ce6647..20e046e 100644
--- a/lnt/tests/nt.py
+++ b/lnt/tests/nt.py
@@ -1,4 +1,5 @@
"""LLVM test-suite compile and execution tests"""
+from __future__ import print_function
import csv
import os
import platform
@@ -64,8 +65,8 @@ class TestModule(object):
cmdstr = ' '.join(args)
if 'cwd' in kwargs:
- print >>self._log, "# In working dir: " + kwargs['cwd']
- print >>self.log, cmdstr
+ print("# In working dir: " + kwargs['cwd'], file=self._log)
+ print(cmdstr, file=self.log)
self._log.flush()
p = subprocess.Popen(args, stdout=self._log, stderr=self._log,
@@ -530,7 +531,7 @@ def execute_test_modules(test_log, test_modules, test_module_variables,
# parallel build options to the test.
test_modules.sort()
- print >>sys.stderr, '%s: executing test modules' % (timestamp(),)
+ print('%s: executing test modules' % (timestamp(), ), file=sys.stderr)
results = []
for name in test_modules:
# First, load the test module file.
@@ -655,10 +656,10 @@ def execute_nt_tests(test_log, make_variables, basedir, config):
if config.use_isolation:
# Write out the sandbox profile.
sandbox_profile_path = os.path.join(basedir, "isolation.sb")
- print >>sys.stderr, "%s: creating sandbox profile %r" % (
- timestamp(), sandbox_profile_path)
+ print("%s: creating sandbox profile %r" % (
+ timestamp(), sandbox_profile_path), file=sys.stderr)
with open(sandbox_profile_path, 'w') as f:
- print >>f, """
+ print("""
;; Sandbox profile for isolation test access.
(version 1)
@@ -678,44 +679,44 @@ def execute_nt_tests(test_log, make_variables, basedir, config):
(regex #"^/private/tmp/")
(regex #"^/private/var/folders/")
(regex #"^/dev/")
- (regex #"^%s"))""" % (basedir,)
+ (regex #"^%s"))""" % (basedir, ), file=f)
common_args = ['sandbox-exec', '-f', sandbox_profile_path] +\
common_args
# Run a separate 'make build' step if --build-threads was given.
if config.build_threads > 0:
args = common_args + ['-j', str(config.build_threads), 'build']
- print >>test_log, '%s: running: %s' % (timestamp(),
- ' '.join('"%s"' % a
- for a in args))
+ print('%s: running: %s' % (timestamp(),
+ ' '.join('"%s"' % a
+ for a in args)), file=test_log)
test_log.flush()
- print >>sys.stderr, '%s: building "nightly tests" with -j%u...' % (
- timestamp(), config.build_threads)
+ print('%s: building "nightly tests" with -j%u...' % (
+ timestamp(), config.build_threads), file=sys.stderr)
res = execute_command(test_log, basedir, args, report_dir)
if res != 0:
- print >> sys.stderr, "Failure while running make build! " \
- "See log: %s" % test_log.name
+ print("Failure while running make build! " \
+ "See log: %s" % test_log.name, file=sys.stderr)
# Then 'make report'.
args = common_args + ['-j', str(config.threads),
'report', 'report.%s.csv' % config.test_style]
- print >>test_log, '%s: running: %s' % (timestamp(),
- ' '.join('"%s"' % a
- for a in args))
+ print('%s: running: %s' % (timestamp(),
+ ' '.join('"%s"' % a
+ for a in args)), file=test_log)
test_log.flush()
# FIXME: We shouldn't need to set env=os.environ here, but if we don't
# somehow MACOSX_DEPLOYMENT_TARGET gets injected into the environment on OS
# X (which changes the driver behavior and causes generally weirdness).
- print >>sys.stderr, '%s: executing "nightly tests" with -j%u...' % (
- timestamp(), config.threads)
+ print('%s: executing "nightly tests" with -j%u...' % (
+ timestamp(), config.threads), file=sys.stderr)
res = execute_command(test_log, basedir, args, report_dir)
if res != 0:
- print >> sys.stderr, "Failure while running nightly tests! "\
- "See log: %s" % test_log.name
+ print("Failure while running nightly tests! "\
+ "See log: %s" % test_log.name, file=sys.stderr)
# Keep a mapping of mangled test names, to the original names in the
@@ -867,10 +868,10 @@ def load_nt_report_file(report_path, config):
def prepare_report_dir(config):
# Set up the sandbox.
sandbox_path = config.sandbox_path
- print sandbox_path
+ print(sandbox_path)
if not os.path.exists(sandbox_path):
- print >>sys.stderr, "%s: creating sandbox: %r" % (
- timestamp(), sandbox_path)
+ print("%s: creating sandbox: %r" % (
+ timestamp(), sandbox_path), file=sys.stderr)
os.mkdir(sandbox_path)
# Create the per-test directory.
@@ -908,15 +909,15 @@ def prepare_build_dir(config, iteration):
def update_tools(make_variables, config, iteration):
"""Update the test suite tools. """
- print >>sys.stderr, '%s: building test-suite tools' % (timestamp(),)
+ print('%s: building test-suite tools' % (timestamp(), ), file=sys.stderr)
args = ['make', 'tools']
args.extend('%s=%s' % (k, v) for k, v in make_variables.items())
build_tools_log_path = os.path.join(config.build_dir(iteration),
'build-tools.log')
build_tools_log = open(build_tools_log_path, 'w')
- print >>build_tools_log, '%s: running: %s' % (timestamp(),
- ' '.join('"%s"' % a
- for a in args))
+ print('%s: running: %s' % (timestamp(),
+ ' '.join('"%s"' % a
+ for a in args)), file=build_tools_log)
build_tools_log.flush()
res = execute_command(build_tools_log, config.build_dir(iteration),
args, config.report_dir)
@@ -947,12 +948,12 @@ def configure_test_suite(config, iteration):
args.extend(['--target=%s' % config.target])
- print >>configure_log, '%s: running: %s' % (timestamp(),
- ' '.join('"%s"' % a
- for a in args))
+ print('%s: running: %s' % (timestamp(),
+ ' '.join('"%s"' % a
+ for a in args)), file=configure_log)
configure_log.flush()
- print >>sys.stderr, '%s: configuring...' % timestamp()
+ print('%s: configuring...' % timestamp(), file=sys.stderr)
res = execute_command(configure_log, basedir, args, config.report_dir)
configure_log.close()
if res != 0:
@@ -968,15 +969,15 @@ def copy_missing_makefiles(config, basedir):
obj_path = os.path.join(basedir, suffix)
src_path = os.path.join(config.test_suite_root, suffix)
if not os.path.exists(obj_path):
- print '%s: initializing test dir %s' % (timestamp(), suffix)
+ print('%s: initializing test dir %s' % (timestamp(), suffix))
os.mkdir(obj_path)
shutil.copyfile(os.path.join(src_path, 'Makefile'),
os.path.join(obj_path, 'Makefile'))
def run_test(nick_prefix, iteration, config):
- print >>sys.stderr, "%s: checking source versions" % (
- timestamp(),)
+ print("%s: checking source versions" % (
+ timestamp(), ), file=sys.stderr)
test_suite_source_version = get_source_version(config.test_suite_root)
@@ -989,11 +990,11 @@ def run_test(nick_prefix, iteration, config):
config)
# Scan for LNT-based test modules.
- print >>sys.stderr, "%s: scanning for LNT-based test modules" % (
- timestamp(),)
+ print("%s: scanning for LNT-based test modules" % (
+ timestamp(), ), file=sys.stderr)
test_modules = list(scan_for_test_modules(config))
- print >>sys.stderr, "%s: found %d LNT-based test modules" % (
- timestamp(), len(test_modules))
+ print("%s: found %d LNT-based test modules" % (
+ timestamp(), len(test_modules)), file=sys.stderr)
nick = nick_prefix
if config.auto_name:
@@ -1001,7 +1002,7 @@ def run_test(nick_prefix, iteration, config):
cc_info = config.cc_info
cc_nick = '%s_%s' % (cc_info.get('cc_name'), cc_info.get('cc_build'))
nick += "__%s__%s" % (cc_nick, cc_info.get('cc_target').split('-')[0])
- print >>sys.stderr, "%s: using nickname: %r" % (timestamp(), nick)
+ print("%s: using nickname: %r" % (timestamp(), nick), file=sys.stderr)
basedir = prepare_build_dir(config, iteration)
@@ -1009,7 +1010,7 @@ def run_test(nick_prefix, iteration, config):
# cause make horrible fits).
start_time = timestamp()
- print >>sys.stderr, '%s: starting test in %r' % (start_time, basedir)
+ print('%s: starting test in %r' % (start_time, basedir), file=sys.stderr)
# Configure the test suite.
if config.run_configure or not os.path.exists(os.path.join(
@@ -1054,9 +1055,9 @@ def run_test(nick_prefix, iteration, config):
else:
test_namespace = 'nightlytest'
if run_nightly_test:
- print >>sys.stderr, '%s: loading nightly test data...' % timestamp()
+ print('%s: loading nightly test data...' % timestamp(), file=sys.stderr)
# If nightly test went screwy, it won't have produced a report.
- print build_report_path
+ print(build_report_path)
if not os.path.exists(build_report_path):
fatal('nightly test failed, no report generated')
@@ -1076,7 +1077,7 @@ def run_test(nick_prefix, iteration, config):
existing_tests.add(s.name)
test_samples.extend(results)
- print >>sys.stderr, '%s: capturing machine information' % (timestamp(),)
+ print('%s: capturing machine information' % (timestamp(), ), file=sys.stderr)
# Collect the machine and run info.
#
# FIXME: Import full range of data that the Clang tests are using?
@@ -1154,19 +1155,19 @@ def run_test(nick_prefix, iteration, config):
if name in target:
logger.warning("parameter %r overwrote existing value: %r" %
(name, target.get(name)))
- print target, name, value
+ print(target, name, value)
target[name] = value
# Generate the test report.
lnt_report_path = config.report_path(iteration)
- print >>sys.stderr, '%s: generating report: %r' % (timestamp(),
- lnt_report_path)
+ print('%s: generating report: %r' % (timestamp(),
+ lnt_report_path), file=sys.stderr)
machine = lnt.testing.Machine(nick, machine_info)
run = lnt.testing.Run(start_time, end_time, info=run_info)
report = lnt.testing.Report(machine, run, test_samples)
lnt_report_file = open(lnt_report_path, 'w')
- print >>lnt_report_file, report.render()
+ print(report.render(), file=lnt_report_file)
lnt_report_file.close()
return report
@@ -1236,7 +1237,7 @@ def _prepare_testsuite_for_rerun(test_name, test_full_path, config):
assert len(to_go) >= 1, "Missing at least one accounting file."
for path in to_go:
- print "Removing:", path
+ print("Removing:", path)
os.remove(path)
@@ -1706,16 +1707,16 @@ class NTTest(builtintest.BuiltinTest):
reports = []
for i in range(opts.multisample):
- print >>sys.stderr, "%s: (multisample) running iteration %d" %\
- (timestamp(), i)
+ print("%s: (multisample) running iteration %d" %\
+ (timestamp(), i), file=sys.stderr)
report = run_test(opts.label, i, config)
reports.append(report)
# Create the merged report.
#
# FIXME: Do a more robust job of merging the reports?
- print >>sys.stderr, "%s: (multisample) creating merged report" % (
- timestamp(),)
+ print("%s: (multisample) creating merged report" % (
+ timestamp(), ), file=sys.stderr)
machine = reports[0].machine
run = reports[0].run
run.end_time = reports[-1].run.end_time
@@ -1726,7 +1727,7 @@ class NTTest(builtintest.BuiltinTest):
lnt_report_path = config.report_path(None)
report = lnt.testing.Report(machine, run, test_samples)
lnt_report_file = open(lnt_report_path, 'w')
- print >>lnt_report_file, report.render()
+ print(report.render(), file=lnt_report_file)
lnt_report_file.close()
else:
@@ -1742,7 +1743,7 @@ class NTTest(builtintest.BuiltinTest):
lnt_report_path = config.report_path(None)
lnt_report_file = open(lnt_report_path, 'w')
- print >>lnt_report_file, test_results.render()
+ print(test_results.render(), file=lnt_report_file)
lnt_report_file.close()
merge_run = 'replace'
diff --git a/lnt/tests/test_suite.py b/lnt/tests/test_suite.py
index 83614a5..ece0eab 100644
--- a/lnt/tests/test_suite.py
+++ b/lnt/tests/test_suite.py
@@ -1,4 +1,5 @@
"""LLVM test-suite"""
+from __future__ import print_function
import subprocess
import tempfile
import json
@@ -971,7 +972,7 @@ class TestSuiteTest(BuiltinTest):
"iprofiler -timeprofiler -I 40u")
cmd_iprofiler = cmd + ['-DTEST_SUITE_RUN_UNDER=' + iprofiler]
- print ' '.join(cmd_iprofiler)
+ print(' '.join(cmd_iprofiler))
out = subprocess.check_output(cmd_iprofiler)
diff --git a/lnt/util/ImportData.py b/lnt/util/ImportData.py
index 9346e8c..6c50749 100644
--- a/lnt/util/ImportData.py
+++ b/lnt/util/ImportData.py
@@ -1,3 +1,4 @@
+from __future__ import print_function
from lnt.util import NTEmailReport
from contextlib import closing
from lnt.util import logger
@@ -200,18 +201,18 @@ def print_report_result(result, out, err, verbose=True):
# Print the generic import information.
if 'import_file' in result:
- print >>out, "Importing %r" % os.path.basename(result['import_file'])
+ print("Importing %r" % os.path.basename(result['import_file']), file=out)
if result['success']:
- print >>out, "Import succeeded."
- print >>out
+ print("Import succeeded.", file=out)
+ print(file=out)
else:
out.flush()
- print >>err, "Import Failed:"
- print >>err, "%s\n" % result['error']
+ print("Import Failed:", file=err)
+ print("%s\n" % result['error'], file=err)
message = result.get('message', None)
if message:
- print >>err, "%s\n" % message
- print >>err, "--------------"
+ print("%s\n" % message, file=err)
+ print("--------------", file=err)
err.flush()
return
@@ -223,15 +224,15 @@ def print_report_result(result, out, err, verbose=True):
# List the parameter sets, if interesting.
show_pset = len(test_results) > 1
if show_pset:
- print >>out, "Parameter Sets"
- print >>out, "--------------"
+ print("Parameter Sets", file=out)
+ print("--------------", file=out)
for i, info in enumerate(test_results):
- print >>out, "P%d: %s" % (i, info['pset'])
- print >>out
+ print("P%d: %s" % (i, info['pset']), file=out)
+ print(file=out)
total_num_tests = sum([len(item['results'])
for item in test_results])
- print >>out, "--- Tested: %d tests --" % total_num_tests
+ print("--- Tested: %d tests --" % total_num_tests, file=out)
test_index = 0
result_kinds = collections.Counter()
for i, item in enumerate(test_results):
@@ -270,51 +271,51 @@ def print_report_result(result, out, err, verbose=True):
if show_pset:
name = 'P%d :: %s' % (i, name)
- print >>out, "%s: %s (%d of %d)" % (result_string, name,
- test_index, total_num_tests)
+ print("%s: %s (%d of %d)" % (result_string, name,
+ test_index, total_num_tests), file=out)
if result_info:
- print >>out, "%s TEST '%s' %s" % ('*'*20, name, '*'*20)
- print >>out, result_info
- print >>out, "*" * 20
+ print("%s TEST '%s' %s" % ('*'*20, name, '*'*20), file=out)
+ print(result_info, file=out)
+ print("*" * 20, file=out)
if 'original_run' in result:
- print >>out, ("This submission is a duplicate of run %d, "
- "already in the database.") % result['original_run']
- print >>out
+ print(("This submission is a duplicate of run %d, "
+ "already in the database.") % result['original_run'], file=out)
+ print(file=out)
if result['report_to_address']:
- print >>out, "Report emailed to: %r" % result['report_to_address']
- print >>out
+ print("Report emailed to: %r" % result['report_to_address'], file=out)
+ print(file=out)
# Print the processing times.
- print >>out, "Processing Times"
- print >>out, "----------------"
- print >>out, "Load : %.2fs" % result['load_time']
- print >>out, "Import : %.2fs" % result['import_time']
- print >>out, "Report : %.2fs" % result['report_time']
- print >>out, "Total : %.2fs" % result['total_time']
- print >>out
+ print("Processing Times", file=out)
+ print("----------------", file=out)
+ print("Load : %.2fs" % result['load_time'], file=out)
+ print("Import : %.2fs" % result['import_time'], file=out)
+ print("Report : %.2fs" % result['report_time'], file=out)
+ print("Total : %.2fs" % result['total_time'], file=out)
+ print(file=out)
# Print the added database items.
total_added = (result['added_machines'] + result['added_runs'] +
result['added_tests'] + result.get('added_samples', 0))
if total_added:
- print >>out, "Imported Data"
- print >>out, "-------------"
+ print("Imported Data", file=out)
+ print("-------------", file=out)
if result['added_machines']:
- print >>out, "Added Machines: %d" % result['added_machines']
+ print("Added Machines: %d" % result['added_machines'], file=out)
if result['added_runs']:
- print >>out, "Added Runs : %d" % result['added_runs']
+ print("Added Runs : %d" % result['added_runs'], file=out)
if result['added_tests']:
- print >>out, "Added Tests : %d" % result['added_tests']
+ print("Added Tests : %d" % result['added_tests'], file=out)
if result.get('added_samples', 0):
- print >>out, "Added Samples : %d" % result['added_samples']
- print >>out
- print >>out, "Results"
- print >>out, "----------------"
+ print("Added Samples : %d" % result['added_samples'], file=out)
+ print(file=out)
+ print("Results", file=out)
+ print("----------------", file=out)
for kind, count in result_kinds.items():
- print >>out, kind, ":", count
+ print(kind, ":", count, file=out)
def import_from_string(config, db_name, db, session, ts_name, data,
diff --git a/lnt/util/ServerUtil.py b/lnt/util/ServerUtil.py
index d88e503..8042528 100644
--- a/lnt/util/ServerUtil.py
+++ b/lnt/util/ServerUtil.py
@@ -1,6 +1,7 @@
"""
Utility for submitting files to a web server over HTTP.
"""
+from __future__ import print_function
import sys
import urllib
import urllib2
@@ -19,7 +20,7 @@ def _show_json_error(reply):
try:
error = json.loads(reply)
except ValueError:
- print "error: {}".format(reply)
+ print("error: {}".format(reply))
return
sys.stderr.write("error: lnt server: {}\n".format(error.get('error')))
message = error.get('message', '')
@@ -55,13 +56,13 @@ def submitFileToServer(url, file, select_machine=None, merge_run=None):
return json.loads(result_data)
except Exception:
import traceback
- print "Unable to load result, not a valid JSON object."
- print
- print "Traceback:"
+ print("Unable to load result, not a valid JSON object.")
+ print()
+ print("Traceback:")
traceback.print_exc()
- print
- print "Result:"
- print "error:", result_data
+ print()
+ print("Result:")
+ print("error:", result_data)
return
diff --git a/lnt/util/wsgi_restart.py b/lnt/util/wsgi_restart.py
index 2d5ec4f..ec3b486 100644
--- a/lnt/util/wsgi_restart.py
+++ b/lnt/util/wsgi_restart.py
@@ -1,4 +1,5 @@
# This code lifted from the mod_wsgi docs.
+from __future__ import print_function
import os
import sys
import signal
@@ -18,8 +19,8 @@ _lock = threading.Lock()
def _restart(path):
_queue.put(True)
prefix = 'monitor (pid=%d):' % os.getpid()
- print >> sys.stderr, '%s Change detected to \'%s\'.' % (prefix, path)
- print >> sys.stderr, '%s Triggering process restart.' % prefix
+ print('%s Change detected to \'%s\'.' % (prefix, path), file=sys.stderr)
+ print('%s Triggering process restart.' % prefix, file=sys.stderr)
os.kill(os.getpid(), signal.SIGINT)
@@ -116,6 +117,6 @@ def start(interval=1.0):
_lock.acquire()
if not _running:
prefix = 'monitor (pid=%d):' % os.getpid()
- print >> sys.stderr, '%s Starting change monitor.' % prefix
+ print('%s Starting change monitor.' % prefix, file=sys.stderr)
_running = True
_thread.start()