diff --git a/tests/helpers-testrepo.sh b/tests/helpers-testrepo.sh index 91649f4640bebc9564eb93388fbc061e23ac9c2d_dGVzdHMvaGVscGVycy10ZXN0cmVwby5zaA==..d17e5d78ed675470b91f284e48ab65a0c9b19bc6_dGVzdHMvaGVscGVycy10ZXN0cmVwby5zaA== 100644 --- a/tests/helpers-testrepo.sh +++ b/tests/helpers-testrepo.sh @@ -9,6 +9,13 @@ # The mercurial source repository was typically orignally cloned with the # system mercurial installation, and may require extensions or settings from # the system installation. + +if [ -n "$HGTESTEXTRAEXTENSIONS" ]; then + for extension in $HGTESTEXTRAEXTENSIONS; do + extraoptions="$extraoptions --config extensions.$extension=!" + done +fi + syshg () { ( syshgenv @@ -48,6 +55,6 @@ alias testrepohg=syshg alias testrepohgenv=syshgenv else - alias testrepohg=hg + alias testrepohg="hg $extraoptions" alias testrepohgenv=: fi diff --git a/tests/heredoctest.py b/tests/heredoctest.py index 91649f4640bebc9564eb93388fbc061e23ac9c2d_dGVzdHMvaGVyZWRvY3Rlc3QucHk=..d17e5d78ed675470b91f284e48ab65a0c9b19bc6_dGVzdHMvaGVyZWRvY3Rlc3QucHk= 100644 --- a/tests/heredoctest.py +++ b/tests/heredoctest.py @@ -1,2 +1,4 @@ +from __future__ import absolute_import, print_function + import sys @@ -1,3 +3,9 @@ import sys + +def flush(): + sys.stdout.flush() + sys.stderr.flush() + + globalvars = {} @@ -3,6 +11,5 @@ globalvars = {} -localvars = {} lines = sys.stdin.readlines() while lines: l = lines.pop(0) if l.startswith('SALT'): @@ -5,9 +12,9 @@ lines = sys.stdin.readlines() while lines: l = lines.pop(0) if l.startswith('SALT'): - print l[:-1] + print(l[:-1]) elif l.startswith('>>> '): snippet = l[4:] while lines and lines[0].startswith('... '): l = lines.pop(0) @@ -10,7 +17,7 @@ elif l.startswith('>>> '): snippet = l[4:] while lines and lines[0].startswith('... '): l = lines.pop(0) - snippet += "\n" + l[4:] + snippet += l[4:] c = compile(snippet, '<heredoc>', 'single') try: @@ -15,5 +22,8 @@ c = compile(snippet, '<heredoc>', 'single') try: - exec c in globalvars, localvars - except Exception, inst: - print repr(inst) + flush() + exec(c, globalvars) + flush() + except Exception as inst: + flush() + print(repr(inst)) diff --git a/tests/hghave b/tests/hghave index 91649f4640bebc9564eb93388fbc061e23ac9c2d_dGVzdHMvaGdoYXZl..d17e5d78ed675470b91f284e48ab65a0c9b19bc6_dGVzdHMvaGdoYXZl 100755 --- a/tests/hghave +++ b/tests/hghave @@ -13,8 +13,9 @@ checks = hghave.checks + def list_features(): for name, feature in sorted(checks.items()): desc = feature[1] print(name + ':', desc) @@ -16,8 +17,9 @@ def list_features(): for name, feature in sorted(checks.items()): desc = feature[1] print(name + ':', desc) + def test_features(): failed = 0 for name, feature in checks.items(): @@ -29,4 +31,5 @@ failed += 1 return failed + parser = optparse.OptionParser("%prog [options] [features]") @@ -32,8 +35,11 @@ parser = optparse.OptionParser("%prog [options] [features]") -parser.add_option("--test-features", action="store_true", - help="test available features") -parser.add_option("--list-features", action="store_true", - help="list available features") +parser.add_option( + "--test-features", action="store_true", help="test available features" +) +parser.add_option( + "--list-features", action="store_true", help="list available features" +) + def _loadaddon(): if 'TESTDIR' in os.environ: @@ -49,5 +55,6 @@ sys.path.insert(0, path) try: import hghaveaddon + assert hghaveaddon # silence pyflakes except BaseException as inst: @@ -52,7 +59,8 @@ assert hghaveaddon # silence pyflakes except BaseException as inst: - sys.stderr.write('failed to import hghaveaddon.py from %r: %s\n' - % (path, inst)) + sys.stderr.write( + 'failed to import hghaveaddon.py from %r: %s\n' % (path, inst) + ) sys.exit(2) sys.path.pop(0) @@ -56,6 +64,7 @@ sys.exit(2) sys.path.pop(0) + if __name__ == '__main__': options, args = parser.parse_args() _loadaddon() diff --git a/tests/hghave.py b/tests/hghave.py index 91649f4640bebc9564eb93388fbc061e23ac9c2d_dGVzdHMvaGdoYXZlLnB5..d17e5d78ed675470b91f284e48ab65a0c9b19bc6_dGVzdHMvaGdoYXZlLnB5 100755 --- a/tests/hghave.py +++ b/tests/hghave.py @@ -1,2 +1,2 @@ -from __future__ import absolute_import +from __future__ import absolute_import, print_function @@ -2,5 +2,5 @@ -import errno +import distutils.version import os import re import socket @@ -16,5 +16,37 @@ "false": (lambda: False, "nail clipper"), } +try: + import msvcrt + + msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) + msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY) +except ImportError: + pass + +stdout = getattr(sys.stdout, 'buffer', sys.stdout) +stderr = getattr(sys.stderr, 'buffer', sys.stderr) + +if sys.version_info[0] >= 3: + + def _bytespath(p): + if p is None: + return p + return p.encode('utf-8') + + def _strpath(p): + if p is None: + return p + return p.decode('utf-8') + + +else: + + def _bytespath(p): + return p + + _strpath = _bytespath + + def check(name, desc): """Registers a check function for a feature.""" @@ -19,5 +51,6 @@ def check(name, desc): """Registers a check function for a feature.""" + def decorator(func): checks[name] = (func, desc) return func @@ -21,5 +54,6 @@ def decorator(func): checks[name] = (func, desc) return func + return decorator @@ -24,5 +58,6 @@ return decorator + def checkvers(name, desc, vers): """Registers a check function for each of a series of versions. @@ -26,8 +61,13 @@ def checkvers(name, desc, vers): """Registers a check function for each of a series of versions. - vers can be a list or an iterator""" + vers can be a list or an iterator. + + Produces a series of feature checks that have the form <name><vers> without + any punctuation (even if there's punctuation in 'vers'; i.e. this produces + 'py38', not 'py3.8' or 'py-38').""" + def decorator(func): def funcv(v): def f(): return func(v) @@ -30,5 +70,6 @@ def decorator(func): def funcv(v): def f(): return func(v) + return f @@ -34,6 +75,7 @@ return f + for v in vers: v = str(v) f = funcv(v) checks['%s%s' % (name, v.replace('.', ''))] = (f, desc % v) return func @@ -35,7 +77,8 @@ for v in vers: v = str(v) f = funcv(v) checks['%s%s' % (name, v.replace('.', ''))] = (f, desc % v) return func + return decorator @@ -40,5 +83,6 @@ return decorator + def checkfeatures(features): result = { 'error': [], @@ -69,8 +113,9 @@ return result + def require(features): """Require that features are available, exiting if not.""" result = checkfeatures(features) for missing in result['missing']: @@ -72,7 +117,9 @@ def require(features): """Require that features are available, exiting if not.""" result = checkfeatures(features) for missing in result['missing']: - sys.stderr.write('skipped: unknown feature: %s\n' % missing) + stderr.write( + ('skipped: unknown feature: %s\n' % missing).encode('utf-8') + ) for msg in result['skipped']: @@ -78,3 +125,3 @@ for msg in result['skipped']: - sys.stderr.write('skipped: %s\n' % msg) + stderr.write(('skipped: %s\n' % msg).encode('utf-8')) for msg in result['error']: @@ -80,5 +127,5 @@ for msg in result['error']: - sys.stderr.write('%s\n' % msg) + stderr.write(('%s\n' % msg).encode('utf-8')) if result['missing']: sys.exit(2) @@ -86,8 +133,9 @@ if result['skipped'] or result['error']: sys.exit(1) + def matchoutput(cmd, regexp, ignorestatus=False): """Return the match object if cmd executes successfully and its output is matched by the supplied regular expression. """ r = re.compile(regexp) @@ -89,16 +137,12 @@ def matchoutput(cmd, regexp, ignorestatus=False): """Return the match object if cmd executes successfully and its output is matched by the supplied regular expression. """ r = re.compile(regexp) - try: - p = subprocess.Popen( - cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - except OSError as e: - if e.errno != errno.ENOENT: - raise - ret = -1 - ret = p.wait() - s = p.stdout.read() + p = subprocess.Popen( + cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT + ) + s = p.communicate()[0] + ret = p.returncode return (ignorestatus or not ret) and r.search(s) @@ -103,6 +147,7 @@ return (ignorestatus or not ret) and r.search(s) + @check("baz", "GNU Arch baz client") def has_baz(): return matchoutput('baz --version 2>&1', br'baz Bazaar version') @@ -105,7 +150,8 @@ @check("baz", "GNU Arch baz client") def has_baz(): return matchoutput('baz --version 2>&1', br'baz Bazaar version') + @check("bzr", "Canonical's Bazaar client") def has_bzr(): try: @@ -114,8 +160,9 @@ import bzrlib.errors import bzrlib.revision import bzrlib.revisionspec + bzrlib.revisionspec.RevisionSpec return bzrlib.__doc__ is not None except (AttributeError, ImportError): return False @@ -117,7 +164,8 @@ bzrlib.revisionspec.RevisionSpec return bzrlib.__doc__ is not None except (AttributeError, ImportError): return False + @checkvers("bzr", "Canonical's Bazaar client >= %s", (1.14,)) def has_bzr_range(v): @@ -122,5 +170,5 @@ @checkvers("bzr", "Canonical's Bazaar client >= %s", (1.14,)) def has_bzr_range(v): - major, minor = v.split('.')[0:2] + major, minor = v.split('rc')[0].split('.')[0:2] try: import bzrlib @@ -125,7 +173,10 @@ try: import bzrlib - return (bzrlib.__doc__ is not None - and bzrlib.version_info[:2] >= (int(major), int(minor))) + + return bzrlib.__doc__ is not None and bzrlib.version_info[:2] >= ( + int(major), + int(minor), + ) except ImportError: return False @@ -129,7 +180,8 @@ except ImportError: return False + @check("chg", "running with chg") def has_chg(): return 'CHGHG' in os.environ @@ -132,9 +184,10 @@ @check("chg", "running with chg") def has_chg(): return 'CHGHG' in os.environ + @check("cvs", "cvs client/server") def has_cvs(): re = br'Concurrent Versions System.*?server' return matchoutput('cvs --version 2>&1', re) and not has_msys() @@ -136,10 +189,11 @@ @check("cvs", "cvs client/server") def has_cvs(): re = br'Concurrent Versions System.*?server' return matchoutput('cvs --version 2>&1', re) and not has_msys() + @check("cvs112", "cvs client/server 1.12.* (not cvsnt)") def has_cvs112(): re = br'Concurrent Versions System \(CVS\) 1.12.*?server' return matchoutput('cvs --version 2>&1', re) and not has_msys() @@ -141,10 +195,11 @@ @check("cvs112", "cvs client/server 1.12.* (not cvsnt)") def has_cvs112(): re = br'Concurrent Versions System \(CVS\) 1.12.*?server' return matchoutput('cvs --version 2>&1', re) and not has_msys() + @check("cvsnt", "cvsnt client/server") def has_cvsnt(): re = br'Concurrent Versions System \(CVSNT\) (\d+).(\d+).*\(client/server\)' return matchoutput('cvsnt --version 2>&1', re) @@ -146,9 +201,10 @@ @check("cvsnt", "cvsnt client/server") def has_cvsnt(): re = br'Concurrent Versions System \(CVSNT\) (\d+).(\d+).*\(client/server\)' return matchoutput('cvsnt --version 2>&1', re) + @check("darcs", "darcs client") def has_darcs(): return matchoutput('darcs --version', br'\b2\.([2-9]|\d{2})', True) @@ -151,7 +207,8 @@ @check("darcs", "darcs client") def has_darcs(): return matchoutput('darcs --version', br'\b2\.([2-9]|\d{2})', True) + @check("mtn", "monotone client (>= 1.0)") def has_mtn(): return matchoutput('mtn --version', br'monotone', True) and not matchoutput( @@ -155,7 +212,9 @@ @check("mtn", "monotone client (>= 1.0)") def has_mtn(): return matchoutput('mtn --version', br'monotone', True) and not matchoutput( - 'mtn --version', br'monotone 0\.', True) + 'mtn --version', br'monotone 0\.', True + ) + @check("eol-in-paths", "end-of-lines in paths") def has_eol_in_paths(): @@ -167,6 +226,7 @@ except (IOError, OSError): return False + @check("execbit", "executable bit") def has_executablebit(): try: @@ -177,7 +237,7 @@ m = os.stat(fn).st_mode & 0o777 new_file_has_exec = m & EXECFLAGS os.chmod(fn, m ^ EXECFLAGS) - exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0o777) == m) + exec_flags_cannot_flip = (os.stat(fn).st_mode & 0o777) == m finally: os.unlink(fn) except (IOError, OSError): @@ -185,6 +245,7 @@ return False return not (new_file_has_exec or exec_flags_cannot_flip) + @check("icasefs", "case insensitive file system") def has_icasefs(): # Stolen from mercurial.util @@ -204,6 +265,7 @@ finally: os.remove(path) + @check("fifo", "named pipes") def has_fifo(): if getattr(os, "mkfifo", None) is None: @@ -216,7 +278,8 @@ except OSError: return False + @check("killdaemons", 'killdaemons.py support') def has_killdaemons(): return True @@ -219,7 +282,8 @@ @check("killdaemons", 'killdaemons.py support') def has_killdaemons(): return True + @check("cacheable", "cacheable filesystem") def has_cacheable_fs(): from mercurial import util @@ -231,7 +295,8 @@ finally: os.remove(path) + @check("lsprof", "python lsprof module") def has_lsprof(): try: import _lsprof @@ -234,9 +299,10 @@ @check("lsprof", "python lsprof module") def has_lsprof(): try: import _lsprof - _lsprof.Profiler # silence unused import warning + + _lsprof.Profiler # silence unused import warning return True except ImportError: return False @@ -239,10 +305,11 @@ return True except ImportError: return False + def gethgversion(): m = matchoutput('hg --version --quiet 2>&1', br'(\d+)\.(\d+)') if not m: return (0, 0) return (int(m.group(1)), int(m.group(2))) @@ -243,12 +310,14 @@ def gethgversion(): m = matchoutput('hg --version --quiet 2>&1', br'(\d+)\.(\d+)') if not m: return (0, 0) return (int(m.group(1)), int(m.group(2))) -@checkvers("hg", "Mercurial >= %s", - list([(1.0 * x) / 10 for x in range(9, 99)])) + +@checkvers( + "hg", "Mercurial >= %s", list([(1.0 * x) / 10 for x in range(9, 99)]) +) def has_hg_range(v): major, minor = v.split('.')[0:2] return gethgversion() >= (int(major), int(minor)) @@ -251,10 +320,11 @@ def has_hg_range(v): major, minor = v.split('.')[0:2] return gethgversion() >= (int(major), int(minor)) + @check("hg08", "Mercurial >= 0.8") def has_hg08(): if checks["hg09"][0](): return True return matchoutput('hg help annotate 2>&1', '--date') @@ -255,12 +325,13 @@ @check("hg08", "Mercurial >= 0.8") def has_hg08(): if checks["hg09"][0](): return True return matchoutput('hg help annotate 2>&1', '--date') + @check("hg07", "Mercurial >= 0.7") def has_hg07(): if checks["hg08"][0](): return True return matchoutput('hg --version --quiet 2>&1', 'Mercurial Distributed SCM') @@ -261,12 +332,13 @@ @check("hg07", "Mercurial >= 0.7") def has_hg07(): if checks["hg08"][0](): return True return matchoutput('hg --version --quiet 2>&1', 'Mercurial Distributed SCM') + @check("hg06", "Mercurial >= 0.6") def has_hg06(): if checks["hg07"][0](): return True return matchoutput('hg --version --quiet 2>&1', 'Mercurial version') @@ -267,10 +339,11 @@ @check("hg06", "Mercurial >= 0.6") def has_hg06(): if checks["hg07"][0](): return True return matchoutput('hg --version --quiet 2>&1', 'Mercurial version') + @check("gettext", "GNU Gettext (msgfmt)") def has_gettext(): return matchoutput('msgfmt --version', br'GNU gettext-tools') @@ -273,8 +346,9 @@ @check("gettext", "GNU Gettext (msgfmt)") def has_gettext(): return matchoutput('msgfmt --version', br'GNU gettext-tools') + @check("git", "git command line client") def has_git(): return matchoutput('git --version 2>&1', br'^git version') @@ -277,10 +351,11 @@ @check("git", "git command line client") def has_git(): return matchoutput('git --version 2>&1', br'^git version') + def getgitversion(): m = matchoutput('git --version 2>&1', br'git version (\d+)\.(\d+)') if not m: return (0, 0) return (int(m.group(1)), int(m.group(2))) @@ -281,10 +356,11 @@ def getgitversion(): m = matchoutput('git --version 2>&1', br'git version (\d+)\.(\d+)') if not m: return (0, 0) return (int(m.group(1)), int(m.group(2))) -# https://github.com/git-lfs/lfs-test-server <https://github.com/git-lfs/lfs-test-server> + +# https://github.com/git-lfs/lfs-test-server @check("lfs-test-server", "git-lfs test server") def has_lfsserver(): exe = 'lfs-test-server' @@ -295,8 +371,9 @@ for path in os.environ["PATH"].split(os.pathsep) ) + @checkvers("git", "git client (with ext::sh support) version >= %s", (1.9,)) def has_git_range(v): major, minor = v.split('.')[0:2] return getgitversion() >= (int(major), int(minor)) @@ -298,8 +375,9 @@ @checkvers("git", "git client (with ext::sh support) version >= %s", (1.9,)) def has_git_range(v): major, minor = v.split('.')[0:2] return getgitversion() >= (int(major), int(minor)) + @checkvers("gitbefore", "git client (with ext::sh support) version < %s", (2.24,)) def has_git_less(v): @@ -310,8 +388,9 @@ def has_docutils(): try: import docutils.core - docutils.core.publish_cmdline # silence unused import + + docutils.core.publish_cmdline # silence unused import return True except ImportError: return False @@ -314,10 +393,11 @@ return True except ImportError: return False + def getsvnversion(): m = matchoutput('svn --version --quiet 2>&1', br'^(\d+)\.(\d+)') if not m: return (0, 0) return (int(m.group(1)), int(m.group(2))) @@ -318,11 +398,12 @@ def getsvnversion(): m = matchoutput('svn --version --quiet 2>&1', br'^(\d+)\.(\d+)') if not m: return (0, 0) return (int(m.group(1)), int(m.group(2))) + @checkvers("svn", "subversion client and admin tools >= %s", (1.3, 1.5)) def has_svn_range(v): major, minor = v.split('.')[0:2] return getsvnversion() >= (int(major), int(minor)) @@ -324,7 +405,8 @@ @checkvers("svn", "subversion client and admin tools >= %s", (1.3, 1.5)) def has_svn_range(v): major, minor = v.split('.')[0:2] return getsvnversion() >= (int(major), int(minor)) + @check("svn", "subversion client and admin tools") def has_svn(): @@ -329,9 +411,11 @@ @check("svn", "subversion client and admin tools") def has_svn(): - return matchoutput('svn --version 2>&1', br'^svn, version') and \ - matchoutput('svnadmin --version 2>&1', br'^svnadmin, version') + return matchoutput('svn --version 2>&1', br'^svn, version') and matchoutput( + 'svnadmin --version 2>&1', br'^svnadmin, version' + ) + @check("svn-bindings", "subversion python bindings") def has_svn_bindings(): try: import svn.core @@ -333,8 +417,9 @@ @check("svn-bindings", "subversion python bindings") def has_svn_bindings(): try: import svn.core + version = svn.core.SVN_VER_MAJOR, svn.core.SVN_VER_MINOR if version < (1, 4): return False @@ -342,5 +427,6 @@ except ImportError: return False + @check("p4", "Perforce server and client") def has_p4(): @@ -345,7 +431,9 @@ @check("p4", "Perforce server and client") def has_p4(): - return (matchoutput('p4 -V', br'Rev\. P4/') and - matchoutput('p4d -V', br'Rev\. P4D/')) + return matchoutput('p4 -V', br'Rev\. P4/') and matchoutput( + 'p4d -V', br'Rev\. P4D/' + ) + @check("symlink", "symbolic links") def has_symlink(): @@ -349,7 +437,8 @@ @check("symlink", "symbolic links") def has_symlink(): - if getattr(os, "symlink", None) is None: + # mercurial.windows.checklink() is a hard 'no' at the moment + if os.name == 'nt' or getattr(os, "symlink", None) is None: return False name = tempfile.mktemp(dir='.', prefix=tempprefix) try: @@ -359,6 +448,7 @@ except (OSError, AttributeError): return False + @check("hardlink", "hardlinks") def has_hardlink(): from mercurial import util @@ -362,7 +452,8 @@ @check("hardlink", "hardlinks") def has_hardlink(): from mercurial import util + fh, fn = tempfile.mkstemp(dir='.', prefix=tempprefix) os.close(fh) name = tempfile.mktemp(dir='.', prefix=tempprefix) try: @@ -365,8 +456,8 @@ fh, fn = tempfile.mkstemp(dir='.', prefix=tempprefix) os.close(fh) name = tempfile.mktemp(dir='.', prefix=tempprefix) try: - util.oslink(fn, name) + util.oslink(_bytespath(fn), _bytespath(name)) os.unlink(name) return True except OSError: @@ -374,6 +465,7 @@ finally: os.unlink(fn) + @check("hardlink-whitelisted", "hardlinks on whitelisted filesystems") def has_hardlink_whitelisted(): from mercurial import util @@ -377,4 +469,5 @@ @check("hardlink-whitelisted", "hardlinks on whitelisted filesystems") def has_hardlink_whitelisted(): from mercurial import util + try: @@ -380,6 +473,6 @@ try: - fstype = util.getfstype('.') + fstype = util.getfstype(b'.') except OSError: return False return fstype in util._hardlinkfswhitelist @@ -382,7 +475,8 @@ except OSError: return False return fstype in util._hardlinkfswhitelist + @check("rmcwd", "can remove current working directory") def has_rmcwd(): ocwd = os.getcwd() @@ -403,7 +497,8 @@ except OSError: pass + @check("tla", "GNU Arch tla client") def has_tla(): return matchoutput('tla --version 2>&1', br'The GNU Arch Revision') @@ -406,8 +501,9 @@ @check("tla", "GNU Arch tla client") def has_tla(): return matchoutput('tla --version 2>&1', br'The GNU Arch Revision') + @check("gpg", "gpg client") def has_gpg(): return matchoutput('gpg --version 2>&1', br'GnuPG') @@ -410,8 +506,9 @@ @check("gpg", "gpg client") def has_gpg(): return matchoutput('gpg --version 2>&1', br'GnuPG') + @check("gpg2", "gpg client v2") def has_gpg2(): return matchoutput('gpg --version 2>&1', br'GnuPG[^0-9]+2\.') @@ -414,8 +511,9 @@ @check("gpg2", "gpg client v2") def has_gpg2(): return matchoutput('gpg --version 2>&1', br'GnuPG[^0-9]+2\.') + @check("gpg21", "gpg client v2.1+") def has_gpg21(): return matchoutput('gpg --version 2>&1', br'GnuPG[^0-9]+2\.(?!0)') @@ -418,7 +516,8 @@ @check("gpg21", "gpg client v2.1+") def has_gpg21(): return matchoutput('gpg --version 2>&1', br'GnuPG[^0-9]+2\.(?!0)') + @check("unix-permissions", "unix-style permissions") def has_unix_permissions(): d = tempfile.mkdtemp(dir='.', prefix=tempprefix) @@ -436,7 +535,8 @@ finally: os.rmdir(d) + @check("unix-socket", "AF_UNIX socket family") def has_unix_socket(): return getattr(socket, 'AF_UNIX', None) is not None @@ -439,8 +539,9 @@ @check("unix-socket", "AF_UNIX socket family") def has_unix_socket(): return getattr(socket, 'AF_UNIX', None) is not None + @check("root", "root permissions") def has_root(): return getattr(os, 'geteuid', None) and os.geteuid() == 0 @@ -443,6 +544,7 @@ @check("root", "root permissions") def has_root(): return getattr(os, 'geteuid', None) and os.geteuid() == 0 + @check("pyflakes", "Pyflakes python linter") def has_pyflakes(): @@ -447,8 +549,11 @@ @check("pyflakes", "Pyflakes python linter") def has_pyflakes(): - return matchoutput("sh -c \"echo 'import re' 2>&1 | pyflakes\"", - br"<stdin>:1: 're' imported but unused", - True) + return matchoutput( + "sh -c \"echo 'import re' 2>&1 | pyflakes\"", + br"<stdin>:1: 're' imported but unused", + True, + ) + @check("pylint", "Pylint python linter") def has_pylint(): @@ -452,9 +557,8 @@ @check("pylint", "Pylint python linter") def has_pylint(): - return matchoutput("pylint --help", - br"Usage: pylint", - True) + return matchoutput("pylint --help", br"Usage: pylint", True) + @check("clang-format", "clang-format C code formatter") def has_clang_format(): @@ -458,10 +562,12 @@ @check("clang-format", "clang-format C code formatter") def has_clang_format(): - return matchoutput("clang-format --help", - br"^OVERVIEW: A tool to format C/C\+\+[^ ]+ code.") + m = matchoutput('clang-format --version', br'clang-format version (\d)') + # style changed somewhere between 4.x and 6.x + return m and int(m.group(1)) >= 6 + @check("jshint", "JSHint static code analysis tool") def has_jshint(): return matchoutput("jshint --version 2>&1", br"jshint v") @@ -463,9 +569,10 @@ @check("jshint", "JSHint static code analysis tool") def has_jshint(): return matchoutput("jshint --version 2>&1", br"jshint v") + @check("pygments", "Pygments source highlighting library") def has_pygments(): try: import pygments @@ -468,9 +575,10 @@ @check("pygments", "Pygments source highlighting library") def has_pygments(): try: import pygments - pygments.highlight # silence unused import warning + + pygments.highlight # silence unused import warning return True except ImportError: return False @@ -473,7 +581,24 @@ return True except ImportError: return False + +@check("pygments25", "Pygments version >= 2.5") +def pygments25(): + try: + import pygments + + v = pygments.__version__ + except ImportError: + return False + + parts = v.split(".") + major = int(parts[0]) + minor = int(parts[1]) + + return (major, minor) >= (2, 5) + + @check("outer-repo", "outer repo") def has_outer_repo(): # failing for other reasons than 'no repo' imply that there is a repo @@ -477,10 +602,10 @@ @check("outer-repo", "outer repo") def has_outer_repo(): # failing for other reasons than 'no repo' imply that there is a repo - return not matchoutput('hg root 2>&1', - br'abort: no repository found', True) + return not matchoutput('hg root 2>&1', br'abort: no repository found', True) + @check("ssl", "ssl module available") def has_ssl(): try: import ssl @@ -482,10 +607,11 @@ @check("ssl", "ssl module available") def has_ssl(): try: import ssl + ssl.CERT_NONE return True except ImportError: return False @@ -487,9 +613,10 @@ ssl.CERT_NONE return True except ImportError: return False + @check("sslcontext", "python >= 2.7.9 ssl") def has_sslcontext(): try: import ssl @@ -492,9 +619,10 @@ @check("sslcontext", "python >= 2.7.9 ssl") def has_sslcontext(): try: import ssl + ssl.SSLContext return True except (ImportError, AttributeError): return False @@ -496,8 +624,9 @@ ssl.SSLContext return True except (ImportError, AttributeError): return False + @check("defaultcacerts", "can verify SSL certs by system's CA certs store") def has_defaultcacerts(): from mercurial import sslutil, ui as uimod @@ -501,6 +630,7 @@ @check("defaultcacerts", "can verify SSL certs by system's CA certs store") def has_defaultcacerts(): from mercurial import sslutil, ui as uimod + ui = uimod.ui.load() return sslutil._defaultcacerts(ui) or sslutil._canloaddefaultcerts @@ -504,6 +634,7 @@ ui = uimod.ui.load() return sslutil._defaultcacerts(ui) or sslutil._canloaddefaultcerts + @check("defaultcacertsloaded", "detected presence of loaded system CA certs") def has_defaultcacertsloaded(): import ssl @@ -524,6 +655,7 @@ return len(ctx.get_ca_certs()) > 0 + @check("tls1.2", "TLS 1.2 protocol support") def has_tls1_2(): from mercurial import sslutil @@ -527,9 +659,11 @@ @check("tls1.2", "TLS 1.2 protocol support") def has_tls1_2(): from mercurial import sslutil - return 'tls1.2' in sslutil.supportedprotocols + + return b'tls1.2' in sslutil.supportedprotocols + @check("windows", "Windows") def has_windows(): return os.name == 'nt' @@ -531,9 +665,10 @@ @check("windows", "Windows") def has_windows(): return os.name == 'nt' + @check("system-sh", "system() uses sh") def has_system_sh(): return os.name != 'nt' @@ -536,8 +671,9 @@ @check("system-sh", "system() uses sh") def has_system_sh(): return os.name != 'nt' + @check("serve", "platform and python can manage 'hg serve -d'") def has_serve(): return True @@ -540,9 +676,10 @@ @check("serve", "platform and python can manage 'hg serve -d'") def has_serve(): return True + @check("test-repo", "running tests from repository") def has_test_repo(): t = os.environ["TESTDIR"] return os.path.isdir(os.path.join(t, "..", ".hg")) @@ -544,9 +681,10 @@ @check("test-repo", "running tests from repository") def has_test_repo(): t = os.environ["TESTDIR"] return os.path.isdir(os.path.join(t, "..", ".hg")) + @check("tic", "terminfo compiler and curses module") def has_tic(): try: import curses @@ -549,9 +687,10 @@ @check("tic", "terminfo compiler and curses module") def has_tic(): try: import curses + curses.COLOR_BLUE return matchoutput('test -x "`which tic`"', br'') except ImportError: return False @@ -553,9 +692,17 @@ curses.COLOR_BLUE return matchoutput('test -x "`which tic`"', br'') except ImportError: return False + +@check("xz", "xz compression utility") +def has_xz(): + # When Windows invokes a subprocess in shell mode, it uses `cmd.exe`, which + # only knows `where`, not `which`. So invoke MSYS shell explicitly. + return matchoutput("sh -c 'test -x \"`which xz`\"'", b'') + + @check("msys", "Windows with MSYS") def has_msys(): return os.getenv('MSYSTEM') @@ -558,8 +705,9 @@ @check("msys", "Windows with MSYS") def has_msys(): return os.getenv('MSYSTEM') + @check("aix", "AIX") def has_aix(): return sys.platform.startswith("aix") @@ -562,8 +710,9 @@ @check("aix", "AIX") def has_aix(): return sys.platform.startswith("aix") + @check("osx", "OS X") def has_osx(): return sys.platform == 'darwin' @@ -566,7 +715,8 @@ @check("osx", "OS X") def has_osx(): return sys.platform == 'darwin' + @check("osxpackaging", "OS X packaging tools") def has_osxpackaging(): try: @@ -570,13 +720,14 @@ @check("osxpackaging", "OS X packaging tools") def has_osxpackaging(): try: - return (matchoutput('pkgbuild', br'Usage: pkgbuild ', ignorestatus=1) - and matchoutput( - 'productbuild', br'Usage: productbuild ', - ignorestatus=1) - and matchoutput('lsbom', br'Usage: lsbom', ignorestatus=1) - and matchoutput( - 'xar --help', br'Usage: xar', ignorestatus=1)) + return ( + matchoutput('pkgbuild', br'Usage: pkgbuild ', ignorestatus=1) + and matchoutput( + 'productbuild', br'Usage: productbuild ', ignorestatus=1 + ) + and matchoutput('lsbom', br'Usage: lsbom', ignorestatus=1) + and matchoutput('xar --help', br'Usage: xar', ignorestatus=1) + ) except ImportError: return False @@ -580,8 +731,9 @@ except ImportError: return False + @check('linuxormacos', 'Linux or MacOS') def has_linuxormacos(): # This isn't a perfect test for MacOS. But it is sufficient for our needs. return sys.platform.startswith(('linux', 'darwin')) @@ -583,8 +735,9 @@ @check('linuxormacos', 'Linux or MacOS') def has_linuxormacos(): # This isn't a perfect test for MacOS. But it is sufficient for our needs. return sys.platform.startswith(('linux', 'darwin')) + @check("docker", "docker support") def has_docker(): pat = br'A self-sufficient runtime for' @@ -602,7 +755,8 @@ return True return False + @check("debhelper", "debian packaging tools") def has_debhelper(): # Some versions of dpkg say `dpkg', some say 'dpkg' (` vs ' on the first # quote), so just accept anything in that spot. @@ -605,12 +759,15 @@ @check("debhelper", "debian packaging tools") def has_debhelper(): # Some versions of dpkg say `dpkg', some say 'dpkg' (` vs ' on the first # quote), so just accept anything in that spot. - dpkg = matchoutput('dpkg --version', - br"Debian .dpkg' package management program") - dh = matchoutput('dh --help', - br'dh is a part of debhelper.', ignorestatus=True) - dh_py2 = matchoutput('dh_python2 --help', - br'other supported Python versions') + dpkg = matchoutput( + 'dpkg --version', br"Debian .dpkg' package management program" + ) + dh = matchoutput( + 'dh --help', br'dh is a part of debhelper.', ignorestatus=True + ) + dh_py2 = matchoutput( + 'dh_python2 --help', br'other supported Python versions' + ) # debuild comes from the 'devscripts' package, though you might want # the 'build-debs' package instead, which has a dependency on devscripts. @@ -615,6 +772,7 @@ # debuild comes from the 'devscripts' package, though you might want # the 'build-debs' package instead, which has a dependency on devscripts. - debuild = matchoutput('debuild --help', - br'to run debian/rules with given parameter') + debuild = matchoutput( + 'debuild --help', br'to run debian/rules with given parameter' + ) return dpkg and dh and dh_py2 and debuild @@ -619,6 +777,8 @@ return dpkg and dh and dh_py2 and debuild -@check("debdeps", - "debian build dependencies (run dpkg-checkbuilddeps in contrib/)") + +@check( + "debdeps", "debian build dependencies (run dpkg-checkbuilddeps in contrib/)" +) def has_debdeps(): # just check exit status (ignoring output) @@ -623,5 +783,5 @@ def has_debdeps(): # just check exit status (ignoring output) - path = '%s/../contrib/debian/control' % os.environ['TESTDIR'] + path = '%s/../contrib/packaging/debian/control' % os.environ['TESTDIR'] return matchoutput('dpkg-checkbuilddeps %s' % path, br'') @@ -626,5 +786,6 @@ return matchoutput('dpkg-checkbuilddeps %s' % path, br'') + @check("demandimport", "demandimport enabled") def has_demandimport(): # chg disables demandimport intentionally for performance wins. @@ -628,5 +789,6 @@ @check("demandimport", "demandimport enabled") def has_demandimport(): # chg disables demandimport intentionally for performance wins. - return ((not has_chg()) and os.environ.get('HGDEMANDIMPORT') != 'disable') + return (not has_chg()) and os.environ.get('HGDEMANDIMPORT') != 'disable' + @@ -632,5 +794,15 @@ -@check("py3k", "running with Python 3.x") -def has_py3k(): +# Add "py27", "py35", ... as possible feature checks. Note that there's no +# punctuation here. +@checkvers("py", "Python >= %s", (2.7, 3.5, 3.6, 3.7, 3.8, 3.9)) +def has_python_range(v): + major, minor = v.split('.')[0:2] + py_major, py_minor = sys.version_info.major, sys.version_info.minor + + return (py_major, py_minor) >= (int(major), int(minor)) + + +@check("py3", "running with Python 3.x") +def has_py3(): return 3 == sys.version_info[0] @@ -635,4 +807,5 @@ return 3 == sys.version_info[0] + @check("py3exe", "a Python 3.x interpreter is available") def has_python3exe(): @@ -637,4 +810,4 @@ @check("py3exe", "a Python 3.x interpreter is available") def has_python3exe(): - return 'PYTHON3' in os.environ + return matchoutput('python3 -V', br'^Python 3.(5|6|7|8|9)') @@ -640,13 +813,4 @@ -@check("py3pygments", "Pygments available on Python 3.x") -def has_py3pygments(): - if has_py3k(): - return has_pygments() - elif has_python3exe(): - # just check exit status (ignoring output) - py3 = os.environ['PYTHON3'] - return matchoutput('%s -c "import pygments"' % py3, br'') - return False @check("pure", "running with pure Python code") def has_pure(): @@ -650,12 +814,15 @@ @check("pure", "running with pure Python code") def has_pure(): - return any([ - os.environ.get("HGMODULEPOLICY") == "py", - os.environ.get("HGTEST_RUN_TESTS_PURE") == "--pure", - ]) + return any( + [ + os.environ.get("HGMODULEPOLICY") == "py", + os.environ.get("HGTEST_RUN_TESTS_PURE") == "--pure", + ] + ) + @check("slow", "allow slow tests (use --allow-slow-tests)") def has_slow(): return os.environ.get('HGTEST_SLOW') == 'slow' @@ -657,9 +824,10 @@ @check("slow", "allow slow tests (use --allow-slow-tests)") def has_slow(): return os.environ.get('HGTEST_SLOW') == 'slow' + @check("hypothesis", "Hypothesis automated test generation") def has_hypothesis(): try: import hypothesis @@ -662,9 +830,10 @@ @check("hypothesis", "Hypothesis automated test generation") def has_hypothesis(): try: import hypothesis + hypothesis.given return True except ImportError: return False @@ -666,9 +835,10 @@ hypothesis.given return True except ImportError: return False + @check("unziplinks", "unzip(1) understands and extracts symlinks") def unzip_understands_symlinks(): return matchoutput('unzip --help', br'Info-ZIP') @@ -671,8 +841,9 @@ @check("unziplinks", "unzip(1) understands and extracts symlinks") def unzip_understands_symlinks(): return matchoutput('unzip --help', br'Info-ZIP') + @check("zstd", "zstd Python module available") def has_zstd(): try: import mercurial.zstd @@ -675,9 +846,10 @@ @check("zstd", "zstd Python module available") def has_zstd(): try: import mercurial.zstd + mercurial.zstd.__version__ return True except ImportError: return False @@ -679,9 +851,10 @@ mercurial.zstd.__version__ return True except ImportError: return False + @check("devfull", "/dev/full special file") def has_dev_full(): return os.path.exists('/dev/full') @@ -684,8 +857,20 @@ @check("devfull", "/dev/full special file") def has_dev_full(): return os.path.exists('/dev/full') + +@check("ensurepip", "ensurepip module") +def has_ensurepip(): + try: + import ensurepip + + ensurepip.bootstrap + return True + except ImportError: + return False + + @check("virtualenv", "Python virtualenv support") def has_virtualenv(): try: import virtualenv @@ -688,9 +873,10 @@ @check("virtualenv", "Python virtualenv support") def has_virtualenv(): try: import virtualenv + virtualenv.ACTIVATE_SH return True except ImportError: return False @@ -692,9 +878,10 @@ virtualenv.ACTIVATE_SH return True except ImportError: return False + @check("fsmonitor", "running tests with fsmonitor") def has_fsmonitor(): return 'HGFSMONITOR_TESTS' in os.environ @@ -697,8 +884,9 @@ @check("fsmonitor", "running tests with fsmonitor") def has_fsmonitor(): return 'HGFSMONITOR_TESTS' in os.environ + @check("fuzzywuzzy", "Fuzzy string matching library") def has_fuzzywuzzy(): try: import fuzzywuzzy @@ -701,9 +889,10 @@ @check("fuzzywuzzy", "Fuzzy string matching library") def has_fuzzywuzzy(): try: import fuzzywuzzy + fuzzywuzzy.__version__ return True except ImportError: return False @@ -705,7 +894,8 @@ fuzzywuzzy.__version__ return True except ImportError: return False + @check("clang-libfuzzer", "clang new enough to include libfuzzer") def has_clang_libfuzzer(): @@ -710,7 +900,7 @@ @check("clang-libfuzzer", "clang new enough to include libfuzzer") def has_clang_libfuzzer(): - mat = matchoutput('clang --version', 'clang version (\d)') + mat = matchoutput('clang --version', br'clang version (\d)') if mat: # libfuzzer is new in clang 6 return int(mat.group(1)) > 5 return False @@ -713,4 +903,145 @@ if mat: # libfuzzer is new in clang 6 return int(mat.group(1)) > 5 return False + + +@check("clang-6.0", "clang 6.0 with version suffix (libfuzzer included)") +def has_clang60(): + return matchoutput('clang-6.0 --version', br'clang version 6\.') + + +@check("xdiff", "xdiff algorithm") +def has_xdiff(): + try: + from mercurial import policy + + bdiff = policy.importmod('bdiff') + return bdiff.xdiffblocks(b'', b'') == [(0, 0, 0, 0)] + except (ImportError, AttributeError): + return False + + +@check('extraextensions', 'whether tests are running with extra extensions') +def has_extraextensions(): + return 'HGTESTEXTRAEXTENSIONS' in os.environ + + +def getrepofeatures(): + """Obtain set of repository features in use. + + HGREPOFEATURES can be used to define or remove features. It contains + a space-delimited list of feature strings. Strings beginning with ``-`` + mean to remove. + """ + # Default list provided by core. + features = { + 'bundlerepo', + 'revlogstore', + 'fncache', + } + + # Features that imply other features. + implies = { + 'simplestore': ['-revlogstore', '-bundlerepo', '-fncache'], + } + + for override in os.environ.get('HGREPOFEATURES', '').split(' '): + if not override: + continue + + if override.startswith('-'): + if override[1:] in features: + features.remove(override[1:]) + else: + features.add(override) + + for imply in implies.get(override, []): + if imply.startswith('-'): + if imply[1:] in features: + features.remove(imply[1:]) + else: + features.add(imply) + + return features + + +@check('reporevlogstore', 'repository using the default revlog store') +def has_reporevlogstore(): + return 'revlogstore' in getrepofeatures() + + +@check('reposimplestore', 'repository using simple storage extension') +def has_reposimplestore(): + return 'simplestore' in getrepofeatures() + + +@check('repobundlerepo', 'whether we can open bundle files as repos') +def has_repobundlerepo(): + return 'bundlerepo' in getrepofeatures() + + +@check('repofncache', 'repository has an fncache') +def has_repofncache(): + return 'fncache' in getrepofeatures() + + +@check('sqlite', 'sqlite3 module is available') +def has_sqlite(): + try: + import sqlite3 + + version = sqlite3.sqlite_version_info + except ImportError: + return False + + if version < (3, 8, 3): + # WITH clause not supported + return False + + return matchoutput('sqlite3 -version', br'^3\.\d+') + + +@check('vcr', 'vcr http mocking library') +def has_vcr(): + try: + import vcr + + vcr.VCR + return True + except (ImportError, AttributeError): + pass + return False + + +@check('emacs', 'GNU Emacs') +def has_emacs(): + # Our emacs lisp uses `with-eval-after-load` which is new in emacs + # 24.4, so we allow emacs 24.4, 24.5, and 25+ (24.5 was the last + # 24 release) + return matchoutput('emacs --version', b'GNU Emacs 2(4.4|4.5|5|6|7|8|9)') + + +@check('black', 'the black formatter for python') +def has_black(): + blackcmd = 'black --version' + version_regex = b'black, version ([0-9a-b.]+)' + version = matchoutput(blackcmd, version_regex) + sv = distutils.version.StrictVersion + return version and sv(_strpath(version.group(1))) >= sv('19.10b0') + + +@check('pytype', 'the pytype type checker') +def has_pytype(): + pytypecmd = 'pytype --version' + version = matchoutput(pytypecmd, b'[0-9a-b.]+') + sv = distutils.version.StrictVersion + return version and sv(_strpath(version.group(0))) >= sv('2019.10.17') + + +@check("rustfmt", "rustfmt tool") +def has_rustfmt(): + # We use Nightly's rustfmt due to current unstable config options. + return matchoutput( + '`rustup which --toolchain nightly rustfmt` --version', b'rustfmt' + ) diff --git a/tests/killdaemons.py b/tests/killdaemons.py index 91649f4640bebc9564eb93388fbc061e23ac9c2d_dGVzdHMva2lsbGRhZW1vbnMucHk=..d17e5d78ed675470b91f284e48ab65a0c9b19bc6_dGVzdHMva2lsbGRhZW1vbnMucHk= 100755 --- a/tests/killdaemons.py +++ b/tests/killdaemons.py @@ -1,4 +1,17 @@ #!/usr/bin/env python -import os, sys, time, errno, signal +from __future__ import absolute_import +import errno +import os +import signal +import sys +import time + +if os.name == 'nt': + import ctypes + + _BOOL = ctypes.c_long + _DWORD = ctypes.c_ulong + _UINT = ctypes.c_uint + _HANDLE = ctypes.c_void_p @@ -4,6 +17,18 @@ -if os.name =='nt': - import ctypes + ctypes.windll.kernel32.CloseHandle.argtypes = [_HANDLE] + ctypes.windll.kernel32.CloseHandle.restype = _BOOL + + ctypes.windll.kernel32.GetLastError.argtypes = [] + ctypes.windll.kernel32.GetLastError.restype = _DWORD + + ctypes.windll.kernel32.OpenProcess.argtypes = [_DWORD, _BOOL, _DWORD] + ctypes.windll.kernel32.OpenProcess.restype = _HANDLE + + ctypes.windll.kernel32.TerminateProcess.argtypes = [_HANDLE, _UINT] + ctypes.windll.kernel32.TerminateProcess.restype = _BOOL + + ctypes.windll.kernel32.WaitForSingleObject.argtypes = [_HANDLE, _DWORD] + ctypes.windll.kernel32.WaitForSingleObject.restype = _DWORD def _check(ret, expectederr=None): if ret == 0: @@ -16,6 +41,6 @@ logfn('# Killing daemon process %d' % pid) PROCESS_TERMINATE = 1 PROCESS_QUERY_INFORMATION = 0x400 - SYNCHRONIZE = 0x00100000L + SYNCHRONIZE = 0x00100000 WAIT_OBJECT_0 = 0 WAIT_TIMEOUT = 258 @@ -20,3 +45,4 @@ WAIT_OBJECT_0 = 0 WAIT_TIMEOUT = 258 + WAIT_FAILED = _DWORD(0xFFFFFFFF).value handle = ctypes.windll.kernel32.OpenProcess( @@ -22,9 +48,11 @@ handle = ctypes.windll.kernel32.OpenProcess( - PROCESS_TERMINATE|SYNCHRONIZE|PROCESS_QUERY_INFORMATION, - False, pid) - if handle == 0: - _check(0, 87) # err 87 when process not found - return # process not found, already finished + PROCESS_TERMINATE | SYNCHRONIZE | PROCESS_QUERY_INFORMATION, + False, + pid, + ) + if handle is None: + _check(0, 87) # err 87 when process not found + return # process not found, already finished try: r = ctypes.windll.kernel32.WaitForSingleObject(handle, 100) if r == WAIT_OBJECT_0: @@ -28,6 +56,6 @@ try: r = ctypes.windll.kernel32.WaitForSingleObject(handle, 100) if r == WAIT_OBJECT_0: - pass # terminated, but process handle still available + pass # terminated, but process handle still available elif r == WAIT_TIMEOUT: _check(ctypes.windll.kernel32.TerminateProcess(handle, -1)) @@ -32,8 +60,8 @@ elif r == WAIT_TIMEOUT: _check(ctypes.windll.kernel32.TerminateProcess(handle, -1)) - else: - _check(r) + elif r == WAIT_FAILED: + _check(0) # err stored in GetLastError() # TODO?: forcefully kill when timeout # and ?shorter waiting time? when tryhard==True r = ctypes.windll.kernel32.WaitForSingleObject(handle, 100) @@ -36,6 +64,6 @@ # TODO?: forcefully kill when timeout # and ?shorter waiting time? when tryhard==True r = ctypes.windll.kernel32.WaitForSingleObject(handle, 100) - # timeout = 100 ms + # timeout = 100 ms if r == WAIT_OBJECT_0: @@ -41,4 +69,4 @@ if r == WAIT_OBJECT_0: - pass # process is terminated + pass # process is terminated elif r == WAIT_TIMEOUT: logfn('# Daemon process %d is stuck') @@ -43,9 +71,9 @@ elif r == WAIT_TIMEOUT: logfn('# Daemon process %d is stuck') - else: - check(r) # any error - except: #re-raises - ctypes.windll.kernel32.CloseHandle(handle) # no _check, keep error + elif r == WAIT_FAILED: + _check(0) # err stored in GetLastError() + except: # re-raises + ctypes.windll.kernel32.CloseHandle(handle) # no _check, keep error raise _check(ctypes.windll.kernel32.CloseHandle(handle)) @@ -49,4 +77,5 @@ raise _check(ctypes.windll.kernel32.CloseHandle(handle)) + else: @@ -52,4 +81,5 @@ else: + def kill(pid, logfn, tryhard=True): try: os.kill(pid, 0) @@ -64,7 +94,7 @@ os.kill(pid, 0) logfn('# Daemon process %d is stuck - really killing it' % pid) os.kill(pid, signal.SIGKILL) - except OSError, err: + except OSError as err: if err.errno != errno.ESRCH: raise @@ -68,8 +98,9 @@ if err.errno != errno.ESRCH: raise + def killdaemons(pidfile, tryhard=True, remove=False, logfn=None): if not logfn: logfn = lambda s: s # Kill off any leftover daemon processes try: @@ -71,12 +102,21 @@ def killdaemons(pidfile, tryhard=True, remove=False, logfn=None): if not logfn: logfn = lambda s: s # Kill off any leftover daemon processes try: - fp = open(pidfile) - for line in fp: - try: - pid = int(line) - except ValueError: - continue + pids = [] + with open(pidfile) as fp: + for line in fp: + try: + pid = int(line) + if pid <= 0: + raise ValueError + except ValueError: + logfn( + '# Not killing daemon process %s - invalid pid' + % line.rstrip() + ) + continue + pids.append(pid) + for pid in pids: kill(pid, logfn, tryhard) @@ -82,7 +122,6 @@ kill(pid, logfn, tryhard) - fp.close() if remove: os.unlink(pidfile) except IOError: pass @@ -84,6 +123,7 @@ if remove: os.unlink(pidfile) except IOError: pass + if __name__ == '__main__': @@ -89,4 +129,6 @@ if __name__ == '__main__': - path, = sys.argv[1:] - killdaemons(path) + if len(sys.argv) > 1: + (path,) = sys.argv[1:] + else: + path = os.environ["DAEMON_PIDS"] @@ -92,1 +134,2 @@ + killdaemons(path, remove=True) diff --git a/tests/run-tests.py b/tests/run-tests.py index 91649f4640bebc9564eb93388fbc061e23ac9c2d_dGVzdHMvcnVuLXRlc3RzLnB5..d17e5d78ed675470b91f284e48ab65a0c9b19bc6_dGVzdHMvcnVuLXRlc3RzLnB5 100755 --- a/tests/run-tests.py +++ b/tests/run-tests.py @@ -51,6 +51,7 @@ import distutils.version as version import errno import json +import multiprocessing import os import random import re @@ -64,6 +65,7 @@ import threading import time import unittest +import uuid import xml.dom.minidom as minidom try: @@ -73,6 +75,7 @@ try: import shlex + shellquote = shlex.quote except (ImportError, AttributeError): import pipes @@ -76,5 +79,6 @@ shellquote = shlex.quote except (ImportError, AttributeError): import pipes + shellquote = pipes.quote @@ -79,16 +83,7 @@ shellquote = pipes.quote -if os.environ.get('RTUNICODEPEDANTRY', False): - try: - reload(sys) - sys.setdefaultencoding("undefined") - except NameError: - pass - -origenviron = os.environ.copy() -osenvironb = getattr(os, 'environb', os.environ) processlock = threading.Lock() pygmentspresent = False # ANSI color is unsupported prior to Windows 10 if os.name != 'nt': @@ -90,12 +85,12 @@ processlock = threading.Lock() pygmentspresent = False # ANSI color is unsupported prior to Windows 10 if os.name != 'nt': - try: # is pygments installed + try: # is pygments installed import pygments import pygments.lexers as lexers import pygments.lexer as lexer import pygments.formatters as formatters import pygments.token as token import pygments.style as style @@ -96,9 +91,10 @@ import pygments import pygments.lexers as lexers import pygments.lexer as lexer import pygments.formatters as formatters import pygments.token as token import pygments.style as style + pygmentspresent = True difflexer = lexers.DiffLexer() terminal256formatter = formatters.Terminal256Formatter() @@ -106,6 +102,7 @@ pass if pygmentspresent: + class TestRunnerStyle(style.Style): default_style = "" skipped = token.string_to_tokentype("Token.Generic.Skipped") @@ -113,10 +110,10 @@ skippedname = token.string_to_tokentype("Token.Generic.SName") failedname = token.string_to_tokentype("Token.Generic.FName") styles = { - skipped: '#e5e5e5', - skippedname: '#00ffff', - failed: '#7f0000', - failedname: '#ff0000', + skipped: '#e5e5e5', + skippedname: '#00ffff', + failed: '#7f0000', + failedname: '#ff0000', } class TestRunnerLexer(lexer.RegexLexer): @@ -120,7 +117,7 @@ } class TestRunnerLexer(lexer.RegexLexer): - testpattern = r'[\w-]+\.(t|py)( \(case [\w-]+\))?' + testpattern = r'[\w-]+\.(t|py)(#[a-zA-Z0-9_\-\.]+)?' tokens = { 'root': [ (r'^Skipped', token.Generic.Skipped, 'skipped'), @@ -134,9 +131,9 @@ 'failed': [ (testpattern, token.Generic.FName), (r'(:| ).*', token.Generic.Failed), - ] + ], } runnerformatter = formatters.Terminal256Formatter(style=TestRunnerStyle) runnerlexer = TestRunnerLexer() @@ -138,7 +135,9 @@ } runnerformatter = formatters.Terminal256Formatter(style=TestRunnerStyle) runnerlexer = TestRunnerLexer() +origenviron = os.environ.copy() + if sys.version_info > (3, 5, 0): PYTHON3 = True @@ -143,6 +142,7 @@ if sys.version_info > (3, 5, 0): PYTHON3 = True - xrange = range # we use xrange in one place, and we'd rather not use range + xrange = range # we use xrange in one place, and we'd rather not use range + def _bytespath(p): if p is None: return p @@ -153,4 +153,45 @@ return p return p.decode('utf-8') + osenvironb = getattr(os, 'environb', None) + if osenvironb is None: + # Windows lacks os.environb, for instance. A proxy over the real thing + # instead of a copy allows the environment to be updated via bytes on + # all platforms. + class environbytes(object): + def __init__(self, strenv): + self.__len__ = strenv.__len__ + self.clear = strenv.clear + self._strenv = strenv + + def __getitem__(self, k): + v = self._strenv.__getitem__(_strpath(k)) + return _bytespath(v) + + def __setitem__(self, k, v): + self._strenv.__setitem__(_strpath(k), _strpath(v)) + + def __delitem__(self, k): + self._strenv.__delitem__(_strpath(k)) + + def __contains__(self, k): + return self._strenv.__contains__(_strpath(k)) + + def __iter__(self): + return iter([_bytespath(k) for k in iter(self._strenv)]) + + def get(self, k, default=None): + v = self._strenv.get(_strpath(k), _strpath(default)) + return _bytespath(v) + + def pop(self, k, default=None): + v = self._strenv.pop(_strpath(k), _strpath(default)) + return _bytespath(v) + + osenvironb = environbytes(os.environ) + + getcwdb = getattr(os, 'getcwdb') + if not getcwdb or os.name == 'nt': + getcwdb = lambda: _bytespath(os.getcwd()) + elif sys.version_info >= (3, 0, 0): @@ -156,7 +197,9 @@ elif sys.version_info >= (3, 0, 0): - print('%s is only supported on Python 3.5+ and 2.7, not %s' % - (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3]))) - sys.exit(70) # EX_SOFTWARE from `man 3 sysexit` + print( + '%s is only supported on Python 3.5+ and 2.7, not %s' + % (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3])) + ) + sys.exit(70) # EX_SOFTWARE from `man 3 sysexit` else: PYTHON3 = False @@ -168,6 +211,8 @@ return p _strpath = _bytespath + osenvironb = os.environ + getcwdb = os.getcwd # For Windows support wifexited = getattr(os, "WIFEXITED", lambda x: False) @@ -197,6 +242,7 @@ else: return False + # useipv6 will be set by parseargs useipv6 = None @@ -200,6 +246,7 @@ # useipv6 will be set by parseargs useipv6 = None + def checkportisavailable(port): """return true if a port seems free to bind on localhost""" if useipv6: @@ -212,8 +259,11 @@ s.close() return True except socket.error as exc: - if exc.errno not in (errno.EADDRINUSE, errno.EADDRNOTAVAIL, - errno.EPROTONOSUPPORT): + if exc.errno not in ( + errno.EADDRINUSE, + errno.EADDRNOTAVAIL, + errno.EPROTONOSUPPORT, + ): raise return False @@ -217,4 +267,5 @@ raise return False + closefds = os.name == 'posix' @@ -220,3 +271,5 @@ closefds = os.name == 'posix' + + def Popen4(cmd, wd, timeout, env=None): processlock.acquire() @@ -221,9 +274,16 @@ def Popen4(cmd, wd, timeout, env=None): processlock.acquire() - p = subprocess.Popen(cmd, shell=True, bufsize=-1, cwd=wd, env=env, - close_fds=closefds, - stdin=subprocess.PIPE, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) + p = subprocess.Popen( + _strpath(cmd), + shell=True, + bufsize=-1, + cwd=_strpath(wd), + env=env, + close_fds=closefds, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + ) processlock.release() p.fromchild = p.stdout @@ -232,6 +292,7 @@ p.timeout = False if timeout: + def t(): start = time.time() while time.time() - start < timeout and p.returncode is None: @@ -235,7 +296,7 @@ def t(): start = time.time() while time.time() - start < timeout and p.returncode is None: - time.sleep(.1) + time.sleep(0.1) p.timeout = True if p.returncode is None: terminate(p) @@ -239,7 +300,8 @@ p.timeout = True if p.returncode is None: terminate(p) + threading.Thread(target=t).start() return p @@ -242,10 +304,20 @@ threading.Thread(target=t).start() return p -PYTHON = _bytespath(sys.executable.replace('\\', '/')) + +if sys.executable: + sysexecutable = sys.executable +elif os.environ.get('PYTHONEXECUTABLE'): + sysexecutable = os.environ['PYTHONEXECUTABLE'] +elif os.environ.get('PYTHON'): + sysexecutable = os.environ['PYTHON'] +else: + raise AssertionError('Could not find Python interpreter') + +PYTHON = _bytespath(sysexecutable.replace('\\', '/')) IMPL_PATH = b'PYTHONPATH' if 'java' in sys.platform: IMPL_PATH = b'JYTHONPATH' defaults = { @@ -247,7 +319,7 @@ IMPL_PATH = b'PYTHONPATH' if 'java' in sys.platform: IMPL_PATH = b'JYTHONPATH' defaults = { - 'jobs': ('HGTEST_JOBS', 1), + 'jobs': ('HGTEST_JOBS', multiprocessing.cpu_count()), 'timeout': ('HGTEST_TIMEOUT', 180), @@ -253,6 +325,6 @@ 'timeout': ('HGTEST_TIMEOUT', 180), - 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 500), + 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 1500), 'port': ('HGTEST_PORT', 20059), 'shell': ('HGTEST_SHELL', 'sh'), } @@ -255,7 +327,8 @@ 'port': ('HGTEST_PORT', 20059), 'shell': ('HGTEST_SHELL', 'sh'), } + def canonpath(path): return os.path.realpath(os.path.expanduser(path)) @@ -259,6 +332,7 @@ def canonpath(path): return os.path.realpath(os.path.expanduser(path)) + def parselistfiles(files, listtype, warn=True): entries = dict() for filename in files: @@ -280,8 +354,9 @@ f.close() return entries + def parsettestcases(path): """read a .t test file, return a set of test case names If path does not exist, return an empty set. """ @@ -283,10 +358,10 @@ def parsettestcases(path): """read a .t test file, return a set of test case names If path does not exist, return an empty set. """ - cases = set() + cases = [] try: with open(path, 'rb') as f: for l in f: if l.startswith(b'#testcases '): @@ -289,10 +364,10 @@ try: with open(path, 'rb') as f: for l in f: if l.startswith(b'#testcases '): - cases.update(l[11:].split()) + cases.append(sorted(l[11:].split())) except IOError as ex: if ex.errno != errno.ENOENT: raise return cases @@ -294,10 +369,11 @@ except IOError as ex: if ex.errno != errno.ENOENT: raise return cases + def getparser(): """Obtain the OptionParser used by the CLI.""" parser = argparse.ArgumentParser(usage='%(prog)s [options] [tests]') selection = parser.add_argument_group('Test Selection') @@ -299,23 +375,40 @@ def getparser(): """Obtain the OptionParser used by the CLI.""" parser = argparse.ArgumentParser(usage='%(prog)s [options] [tests]') selection = parser.add_argument_group('Test Selection') - selection.add_argument('--allow-slow-tests', action='store_true', - help='allow extremely slow tests') - selection.add_argument("--blacklist", action="append", - help="skip tests listed in the specified blacklist file") - selection.add_argument("--changed", - help="run tests that are changed in parent rev or working directory") - selection.add_argument("-k", "--keywords", - help="run tests matching keywords") - selection.add_argument("-r", "--retest", action="store_true", - help = "retest failed tests") - selection.add_argument("--test-list", action="append", - help="read tests to run from the specified file") - selection.add_argument("--whitelist", action="append", - help="always run tests listed in the specified whitelist file") - selection.add_argument('tests', metavar='TESTS', nargs='*', - help='Tests to run') + selection.add_argument( + '--allow-slow-tests', + action='store_true', + help='allow extremely slow tests', + ) + selection.add_argument( + "--blacklist", + action="append", + help="skip tests listed in the specified blacklist file", + ) + selection.add_argument( + "--changed", + help="run tests that are changed in parent rev or working directory", + ) + selection.add_argument( + "-k", "--keywords", help="run tests matching keywords" + ) + selection.add_argument( + "-r", "--retest", action="store_true", help="retest failed tests" + ) + selection.add_argument( + "--test-list", + action="append", + help="read tests to run from the specified file", + ) + selection.add_argument( + "--whitelist", + action="append", + help="always run tests listed in the specified whitelist file", + ) + selection.add_argument( + 'tests', metavar='TESTS', nargs='*', help='Tests to run' + ) harness = parser.add_argument_group('Test Harness Behavior') @@ -320,8 +413,14 @@ harness = parser.add_argument_group('Test Harness Behavior') - harness.add_argument('--bisect-repo', - metavar='bisect_repo', - help=("Path of a repo to bisect. Use together with " - "--known-good-rev")) - harness.add_argument("-d", "--debug", action="store_true", + harness.add_argument( + '--bisect-repo', + metavar='bisect_repo', + help=( + "Path of a repo to bisect. Use together with " "--known-good-rev" + ), + ) + harness.add_argument( + "-d", + "--debug", + action="store_true", help="debug mode: write output of test scripts to console" @@ -327,8 +426,20 @@ help="debug mode: write output of test scripts to console" - " rather than capturing and diffing it (disables timeout)") - harness.add_argument("-f", "--first", action="store_true", - help="exit on the first test failure") - harness.add_argument("-i", "--interactive", action="store_true", - help="prompt to accept changed output") - harness.add_argument("-j", "--jobs", type=int, + " rather than capturing and diffing it (disables timeout)", + ) + harness.add_argument( + "-f", + "--first", + action="store_true", + help="exit on the first test failure", + ) + harness.add_argument( + "-i", + "--interactive", + action="store_true", + help="prompt to accept changed output", + ) + harness.add_argument( + "-j", + "--jobs", + type=int, help="number of jobs to run in parallel" @@ -334,16 +445,37 @@ help="number of jobs to run in parallel" - " (default: $%s or %d)" % defaults['jobs']) - harness.add_argument("--keep-tmpdir", action="store_true", - help="keep temporary directory after running tests") - harness.add_argument('--known-good-rev', - metavar="known_good_rev", - help=("Automatically bisect any failures using this " - "revision as a known-good revision.")) - harness.add_argument("--list-tests", action="store_true", - help="list tests instead of running them") - harness.add_argument("--loop", action="store_true", - help="loop tests repeatedly") - harness.add_argument('--random', action="store_true", - help='run tests in random order') - harness.add_argument("-p", "--port", type=int, + " (default: $%s or %d)" % defaults['jobs'], + ) + harness.add_argument( + "--keep-tmpdir", + action="store_true", + help="keep temporary directory after running tests", + ) + harness.add_argument( + '--known-good-rev', + metavar="known_good_rev", + help=( + "Automatically bisect any failures using this " + "revision as a known-good revision." + ), + ) + harness.add_argument( + "--list-tests", + action="store_true", + help="list tests instead of running them", + ) + harness.add_argument( + "--loop", action="store_true", help="loop tests repeatedly" + ) + harness.add_argument( + '--random', action="store_true", help='run tests in random order' + ) + harness.add_argument( + '--order-by-runtime', + action="store_true", + help='run slowest tests first, according to .testtimes', + ) + harness.add_argument( + "-p", + "--port", + type=int, help="port on which servers should listen" @@ -349,14 +481,28 @@ help="port on which servers should listen" - " (default: $%s or %d)" % defaults['port']) - harness.add_argument('--profile-runner', action='store_true', - help='run statprof on run-tests') - harness.add_argument("-R", "--restart", action="store_true", - help="restart at last error") - harness.add_argument("--runs-per-test", type=int, dest="runs_per_test", - help="run each test N times (default=1)", default=1) - harness.add_argument("--shell", - help="shell to use (default: $%s or %s)" % defaults['shell']) - harness.add_argument('--showchannels', action='store_true', - help='show scheduling channels') - harness.add_argument("--slowtimeout", type=int, + " (default: $%s or %d)" % defaults['port'], + ) + harness.add_argument( + '--profile-runner', + action='store_true', + help='run statprof on run-tests', + ) + harness.add_argument( + "-R", "--restart", action="store_true", help="restart at last error" + ) + harness.add_argument( + "--runs-per-test", + type=int, + dest="runs_per_test", + help="run each test N times (default=1)", + default=1, + ) + harness.add_argument( + "--shell", help="shell to use (default: $%s or %s)" % defaults['shell'] + ) + harness.add_argument( + '--showchannels', action='store_true', help='show scheduling channels' + ) + harness.add_argument( + "--slowtimeout", + type=int, help="kill errant slow tests after SLOWTIMEOUT seconds" @@ -362,4 +508,8 @@ help="kill errant slow tests after SLOWTIMEOUT seconds" - " (default: $%s or %d)" % defaults['slowtimeout']) - harness.add_argument("-t", "--timeout", type=int, + " (default: $%s or %d)" % defaults['slowtimeout'], + ) + harness.add_argument( + "-t", + "--timeout", + type=int, help="kill errant tests after TIMEOUT seconds" @@ -365,4 +515,6 @@ help="kill errant tests after TIMEOUT seconds" - " (default: $%s or %d)" % defaults['timeout']) - harness.add_argument("--tmpdir", + " (default: $%s or %d)" % defaults['timeout'], + ) + harness.add_argument( + "--tmpdir", help="run tests in the given temporary directory" @@ -368,6 +520,8 @@ help="run tests in the given temporary directory" - " (implies --keep-tmpdir)") - harness.add_argument("-v", "--verbose", action="store_true", - help="output verbose messages") + " (implies --keep-tmpdir)", + ) + harness.add_argument( + "-v", "--verbose", action="store_true", help="output verbose messages" + ) hgconf = parser.add_argument_group('Mercurial Configuration') @@ -372,10 +526,19 @@ hgconf = parser.add_argument_group('Mercurial Configuration') - hgconf.add_argument("--chg", action="store_true", - help="install and use chg wrapper in place of hg") - hgconf.add_argument("--compiler", - help="compiler to build with") - hgconf.add_argument('--extra-config-opt', action="append", default=[], - help='set the given config opt in the test hgrc') - hgconf.add_argument("-l", "--local", action="store_true", + hgconf.add_argument( + "--chg", + action="store_true", + help="install and use chg wrapper in place of hg", + ) + hgconf.add_argument("--compiler", help="compiler to build with") + hgconf.add_argument( + '--extra-config-opt', + action="append", + default=[], + help='set the given config opt in the test hgrc', + ) + hgconf.add_argument( + "-l", + "--local", + action="store_true", help="shortcut for --with-hg=<testdir>/../hg, " @@ -381,13 +544,28 @@ help="shortcut for --with-hg=<testdir>/../hg, " - "and --with-chg=<testdir>/../contrib/chg/chg if --chg is set") - hgconf.add_argument("--ipv6", action="store_true", - help="prefer IPv6 to IPv4 for network related tests") - hgconf.add_argument("--pure", action="store_true", - help="use pure Python code instead of C extensions") - hgconf.add_argument("-3", "--py3k-warnings", action="store_true", - help="enable Py3k warnings on Python 2.7+") - hgconf.add_argument("--with-chg", metavar="CHG", - help="use specified chg wrapper in place of hg") - hgconf.add_argument("--with-hg", + "and --with-chg=<testdir>/../contrib/chg/chg if --chg is set", + ) + hgconf.add_argument( + "--ipv6", + action="store_true", + help="prefer IPv6 to IPv4 for network related tests", + ) + hgconf.add_argument( + "--pure", + action="store_true", + help="use pure Python code instead of C extensions", + ) + hgconf.add_argument( + "-3", + "--py3-warnings", + action="store_true", + help="enable Py3k warnings on Python 2.7+", + ) + hgconf.add_argument( + "--with-chg", + metavar="CHG", + help="use specified chg wrapper in place of hg", + ) + hgconf.add_argument( + "--with-hg", metavar="HG", help="test using specified hg script rather than a " @@ -392,10 +570,6 @@ metavar="HG", help="test using specified hg script rather than a " - "temporary installation") - # This option should be deleted once test-check-py3-compat.t and other - # Python 3 tests run with Python 3. - hgconf.add_argument("--with-python3", metavar="PYTHON3", - help="Python 3 interpreter (if running under Python 2)" - " (TEMPORARY)") + "temporary installation", + ) reporting = parser.add_argument_group('Results Reporting') @@ -400,6 +574,12 @@ reporting = parser.add_argument_group('Results Reporting') - reporting.add_argument("-C", "--annotate", action="store_true", - help="output files annotated with coverage") - reporting.add_argument("--color", choices=["always", "auto", "never"], + reporting.add_argument( + "-C", + "--annotate", + action="store_true", + help="output files annotated with coverage", + ) + reporting.add_argument( + "--color", + choices=["always", "auto", "never"], default=os.environ.get('HGRUNTESTSCOLOR', 'auto'), @@ -405,25 +585,48 @@ default=os.environ.get('HGRUNTESTSCOLOR', 'auto'), - help="colorisation: always|auto|never (default: auto)") - reporting.add_argument("-c", "--cover", action="store_true", - help="print a test coverage report") - reporting.add_argument('--exceptions', action='store_true', - help='log all exceptions and generate an exception report') - reporting.add_argument("-H", "--htmlcov", action="store_true", - help="create an HTML report of the coverage of the files") - reporting.add_argument("--json", action="store_true", - help="store test result data in 'report.json' file") - reporting.add_argument("--outputdir", - help="directory to write error logs to (default=test directory)") - reporting.add_argument("-n", "--nodiff", action="store_true", - help="skip showing test changes") - reporting.add_argument("-S", "--noskips", action="store_true", - help="don't report skip tests verbosely") - reporting.add_argument("--time", action="store_true", - help="time how long each test takes") - reporting.add_argument("--view", - help="external diff viewer") - reporting.add_argument("--xunit", - help="record xunit results at specified path") + help="colorisation: always|auto|never (default: auto)", + ) + reporting.add_argument( + "-c", + "--cover", + action="store_true", + help="print a test coverage report", + ) + reporting.add_argument( + '--exceptions', + action='store_true', + help='log all exceptions and generate an exception report', + ) + reporting.add_argument( + "-H", + "--htmlcov", + action="store_true", + help="create an HTML report of the coverage of the files", + ) + reporting.add_argument( + "--json", + action="store_true", + help="store test result data in 'report.json' file", + ) + reporting.add_argument( + "--outputdir", + help="directory to write error logs to (default=test directory)", + ) + reporting.add_argument( + "-n", "--nodiff", action="store_true", help="skip showing test changes" + ) + reporting.add_argument( + "-S", + "--noskips", + action="store_true", + help="don't report skip tests verbosely", + ) + reporting.add_argument( + "--time", action="store_true", help="time how long each test takes" + ) + reporting.add_argument("--view", help="external diff viewer") + reporting.add_argument( + "--xunit", help="record xunit results at specified path" + ) for option, (envvar, default) in defaults.items(): defaults[option] = type(default)(os.environ.get(envvar, default)) @@ -431,6 +634,7 @@ return parser + def parseargs(args, parser): """Parse arguments with our OptionParser and validate results.""" options = parser.parse_args(args) @@ -439,11 +643,4 @@ if 'java' in sys.platform or '__pypy__' in sys.modules: options.pure = True - if options.with_hg: - options.with_hg = canonpath(_bytespath(options.with_hg)) - if not (os.path.isfile(options.with_hg) and - os.access(options.with_hg, os.X_OK)): - parser.error('--with-hg must specify an executable hg script') - if os.path.basename(options.with_hg) not in [b'hg', b'hg.exe']: - sys.stderr.write('warning: --with-hg should specify an hg script\n') if options.local: @@ -449,4 +646,6 @@ if options.local: + if options.with_hg or options.with_chg: + parser.error('--local cannot be used with --with-hg or --with-chg') testdir = os.path.dirname(_bytespath(canonpath(sys.argv[0]))) reporootdir = os.path.dirname(testdir) pathandattrs = [(b'hg', 'with_hg')] @@ -455,12 +654,25 @@ for relpath, attr in pathandattrs: binpath = os.path.join(reporootdir, relpath) if os.name != 'nt' and not os.access(binpath, os.X_OK): - parser.error('--local specified, but %r not found or ' - 'not executable' % binpath) - setattr(options, attr, binpath) + parser.error( + '--local specified, but %r not found or ' + 'not executable' % binpath + ) + setattr(options, attr, _strpath(binpath)) + + if options.with_hg: + options.with_hg = canonpath(_bytespath(options.with_hg)) + if not ( + os.path.isfile(options.with_hg) + and os.access(options.with_hg, os.X_OK) + ): + parser.error('--with-hg must specify an executable hg script') + if os.path.basename(options.with_hg) not in [b'hg', b'hg.exe']: + sys.stderr.write('warning: --with-hg should specify an hg script\n') + sys.stderr.flush() if (options.chg or options.with_chg) and os.name == 'nt': parser.error('chg does not work on %s' % os.name) if options.with_chg: options.chg = False # no installation to temporary location options.with_chg = canonpath(_bytespath(options.with_chg)) @@ -461,11 +673,13 @@ if (options.chg or options.with_chg) and os.name == 'nt': parser.error('chg does not work on %s' % os.name) if options.with_chg: options.chg = False # no installation to temporary location options.with_chg = canonpath(_bytespath(options.with_chg)) - if not (os.path.isfile(options.with_chg) and - os.access(options.with_chg, os.X_OK)): + if not ( + os.path.isfile(options.with_chg) + and os.access(options.with_chg, os.X_OK) + ): parser.error('--with-chg must specify a chg executable') if options.chg and options.with_hg: # chg shares installation location with hg @@ -469,7 +683,9 @@ parser.error('--with-chg must specify a chg executable') if options.chg and options.with_hg: # chg shares installation location with hg - parser.error('--chg does not work when --with-hg is specified ' - '(use --with-chg instead)') + parser.error( + '--chg does not work when --with-hg is specified ' + '(use --with-chg instead)' + ) if options.color == 'always' and not pygmentspresent: @@ -474,7 +690,9 @@ if options.color == 'always' and not pygmentspresent: - sys.stderr.write('warning: --color=always ignored because ' - 'pygments is not installed\n') + sys.stderr.write( + 'warning: --color=always ignored because ' + 'pygments is not installed\n' + ) if options.bisect_repo and not options.known_good_rev: parser.error("--bisect-repo cannot be used without --known-good-rev") @@ -484,10 +702,11 @@ useipv6 = checksocketfamily('AF_INET6') else: # only use IPv6 if IPv4 is unavailable and IPv6 is available - useipv6 = ((not checksocketfamily('AF_INET')) - and checksocketfamily('AF_INET6')) + useipv6 = (not checksocketfamily('AF_INET')) and checksocketfamily( + 'AF_INET6' + ) options.anycoverage = options.cover or options.annotate or options.htmlcov if options.anycoverage: try: import coverage @@ -489,8 +708,9 @@ options.anycoverage = options.cover or options.annotate or options.htmlcov if options.anycoverage: try: import coverage + covver = version.StrictVersion(coverage.__version__).version if covver < (3, 3): parser.error('coverage options require coverage 3.3 or later') @@ -499,7 +719,8 @@ if options.anycoverage and options.local: # this needs some path mangling somewhere, I guess - parser.error("sorry, coverage options do not work when --local " - "is specified") + parser.error( + "sorry, coverage options do not work when --local " "is specified" + ) if options.anycoverage and options.with_hg: @@ -504,7 +725,8 @@ if options.anycoverage and options.with_hg: - parser.error("sorry, coverage options do not work when --with-hg " - "is specified") + parser.error( + "sorry, coverage options do not work when --with-hg " "is specified" + ) global verbose if options.verbose: @@ -519,7 +741,6 @@ parser.error("-i/--interactive and -d/--debug are incompatible") if options.debug: if options.timeout != defaults['timeout']: - sys.stderr.write( - 'warning: --timeout option ignored with --debug\n') + sys.stderr.write('warning: --timeout option ignored with --debug\n') if options.slowtimeout != defaults['slowtimeout']: sys.stderr.write( @@ -524,5 +745,6 @@ if options.slowtimeout != defaults['slowtimeout']: sys.stderr.write( - 'warning: --slowtimeout option ignored with --debug\n') + 'warning: --slowtimeout option ignored with --debug\n' + ) options.timeout = 0 options.slowtimeout = 0 @@ -527,8 +749,4 @@ options.timeout = 0 options.slowtimeout = 0 - if options.py3k_warnings: - if PYTHON3: - parser.error( - '--py3k-warnings can only be used on Python 2.7') - if options.with_python3: + if options.py3_warnings: if PYTHON3: @@ -534,23 +752,5 @@ if PYTHON3: - parser.error('--with-python3 cannot be used when executing with ' - 'Python 3') - - options.with_python3 = canonpath(options.with_python3) - # Verify Python3 executable is acceptable. - proc = subprocess.Popen([options.with_python3, b'--version'], - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - out, _err = proc.communicate() - ret = proc.wait() - if ret != 0: - parser.error('could not determine version of python 3') - if not out.startswith('Python '): - parser.error('unexpected output from python3 --version: %s' % - out) - vers = version.LooseVersion(out[len('Python '):]) - if vers < version.LooseVersion('3.5.0'): - parser.error('--with-python3 version must be 3.5.0 or greater; ' - 'got %s' % out) + parser.error('--py3-warnings can only be used on Python 2.7') if options.blacklist: options.blacklist = parselistfiles(options.blacklist, 'blacklist') @@ -564,6 +764,7 @@ return options + def rename(src, dst): """Like os.rename(), trade atomicity and opened files friendliness for existing destination support. @@ -571,6 +772,19 @@ shutil.copy(src, dst) os.remove(src) + +def makecleanable(path): + """Try to fix directory permission recursively so that the entire tree + can be deleted""" + for dirpath, dirnames, _filenames in os.walk(path, topdown=True): + for d in dirnames: + p = os.path.join(dirpath, d) + try: + os.chmod(p, os.stat(p).st_mode & 0o777 | 0o700) # chmod u+rwx + except OSError: + pass + + _unified_diff = difflib.unified_diff if PYTHON3: import functools @@ -574,5 +788,6 @@ _unified_diff = difflib.unified_diff if PYTHON3: import functools + _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff) @@ -577,5 +792,6 @@ _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff) + def getdiff(expected, output, ref, err): servefail = False lines = [] @@ -586,8 +802,9 @@ line = line[:-2] + b'\n' lines.append(line) if not servefail and line.startswith( - b'+ abort: child process failed to start'): + b'+ abort: child process failed to start' + ): servefail = True return servefail, lines @@ -590,5 +807,6 @@ servefail = True return servefail, lines + verbose = False @@ -594,4 +812,6 @@ verbose = False + + def vlog(*msg): """Log only when in verbose mode.""" if verbose is False: @@ -599,6 +819,7 @@ return log(*msg) + # Bytes that break XML even in a CDATA block: control characters 0-31 # sans \t, \n and \r CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]") @@ -607,7 +828,8 @@ # list in group 2, and the preceeding line output in group 1: # # output..output (feature !)\n -optline = re.compile(b'(.*) \((.+?) !\)\n$') +optline = re.compile(br'(.*) \((.+?) !\)\n$') + def cdatasafe(data): """Make a string safe to include in a CDATA block. @@ -619,6 +841,7 @@ """ return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>') + def log(*msg): """Log something to stdout. @@ -632,7 +855,8 @@ print() sys.stdout.flush() + def highlightdiff(line, color): if not color: return line assert pygmentspresent @@ -635,9 +859,11 @@ def highlightdiff(line, color): if not color: return line assert pygmentspresent - return pygments.highlight(line.decode('latin1'), difflexer, - terminal256formatter).encode('latin1') + return pygments.highlight( + line.decode('latin1'), difflexer, terminal256formatter + ).encode('latin1') + def highlightmsg(msg, color): if not color: @@ -645,6 +871,7 @@ assert pygmentspresent return pygments.highlight(msg, runnerlexer, runnerformatter) + def terminate(proc): """Terminate subprocess""" vlog('# Terminating process %d' % proc.pid) @@ -653,5 +880,6 @@ except OSError: pass + def killdaemons(pidfile): import killdaemons as killmod @@ -656,7 +884,8 @@ def killdaemons(pidfile): import killdaemons as killmod - return killmod.killdaemons(pidfile, tryhard=False, remove=True, - logfn=vlog) + + return killmod.killdaemons(pidfile, tryhard=False, remove=True, logfn=vlog) + class Test(unittest.TestCase): """Encapsulates a single, runnable test. @@ -669,14 +898,24 @@ # Status code reserved for skipped tests (used by hghave). SKIPPED_STATUS = 80 - def __init__(self, path, outputdir, tmpdir, keeptmpdir=False, - debug=False, - first=False, - timeout=None, - startport=None, extraconfigopts=None, - py3kwarnings=False, shell=None, hgcommand=None, - slowtimeout=None, usechg=False, - useipv6=False): + def __init__( + self, + path, + outputdir, + tmpdir, + keeptmpdir=False, + debug=False, + first=False, + timeout=None, + startport=None, + extraconfigopts=None, + py3warnings=False, + shell=None, + hgcommand=None, + slowtimeout=None, + usechg=False, + useipv6=False, + ): """Create a test from parameters. path is the full path to the file defining the test. @@ -703,7 +942,7 @@ must have the form "key=value" (something understood by hgrc). Values of the form "foo.key=value" will result in "[foo] key=value". - py3kwarnings enables Py3k warnings. + py3warnings enables Py3k warnings. shell is the shell to execute tests in. """ @@ -729,7 +968,7 @@ self._slowtimeout = slowtimeout self._startport = startport self._extraconfigopts = extraconfigopts or [] - self._py3kwarnings = py3kwarnings + self._py3warnings = py3warnings self._shell = _bytespath(shell) self._hgcommand = hgcommand or b'hg' self._usechg = usechg @@ -751,7 +990,7 @@ # If we're not in --debug mode and reference output file exists, # check test output against it. if self._debug: - return None # to match "out is None" + return None # to match "out is None" elif os.path.exists(self.refpath): with open(self.refpath, 'rb') as f: return f.read().splitlines(True) @@ -798,8 +1037,9 @@ raise if self._usechg: - self._chgsockdir = os.path.join(self._threadtmp, - b'%s.chgsock' % name) + self._chgsockdir = os.path.join( + self._threadtmp, b'%s.chgsock' % name + ) os.mkdir(self._chgsockdir) def run(self, result): @@ -882,7 +1122,7 @@ self._skipped = False if ret == self.SKIPPED_STATUS: - if out is None: # Debug mode, nothing to parse. + if out is None: # Debug mode, nothing to parse. missing = ['unknown'] failed = None else: @@ -902,8 +1142,11 @@ self.fail('no result code from test') elif out != self._refout: # Diff generation may rely on written .err file. - if (ret != 0 or out != self._refout) and not self._skipped \ - and not self._debug: + if ( + (ret != 0 or out != self._refout) + and not self._skipped + and not self._debug + ): with open(self.errpath, 'wb') as f: for line in out: f.write(line) @@ -933,7 +1176,11 @@ self._daemonpids = [] if self._keeptmpdir: - log('\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s' % - (self._testtmp.decode('utf-8'), - self._threadtmp.decode('utf-8'))) + log( + '\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s' + % ( + self._testtmp.decode('utf-8'), + self._threadtmp.decode('utf-8'), + ) + ) else: @@ -939,5 +1186,11 @@ else: - shutil.rmtree(self._testtmp, True) + try: + shutil.rmtree(self._testtmp) + except OSError: + # unreadable directory may be left in $TESTTMP; fix permission + # and try again + makecleanable(self._testtmp) + shutil.rmtree(self._testtmp, True) shutil.rmtree(self._threadtmp, True) if self._usechg: @@ -945,8 +1198,12 @@ # files are deleted shutil.rmtree(self._chgsockdir, True) - if (self._ret != 0 or self._out != self._refout) and not self._skipped \ - and not self._debug and self._out: + if ( + (self._ret != 0 or self._out != self._refout) + and not self._skipped + and not self._debug + and self._out + ): with open(self.errpath, 'wb') as f: for line in self._out: f.write(line) @@ -979,7 +1236,7 @@ self._portmap(2), (br'([^0-9])%s' % re.escape(self._localip()), br'\1$LOCALIP'), (br'\bHG_TXNID=TXN:[a-f0-9]{40}\b', br'HG_TXNID=TXN:$ID$'), - ] + ] r.append((self._escapepath(self._testtmp), b'$TESTTMP')) replacementfile = os.path.join(self._testdir, b'common-pattern.py') @@ -990,8 +1247,13 @@ # the intermediate 'compile' step help with debugging code = compile(source.read(), replacementfile, 'exec') exec(code, data) - r.extend(data.get('substitutions', ())) + for value in data.get('substitutions', ()): + if len(value) != 2: + msg = 'malformatted substitution in %s: %r' + msg %= (replacementfile, value) + raise ValueError(msg) + r.append(value) return r def _escapepath(self, p): if os.name == 'nt': @@ -994,11 +1256,16 @@ return r def _escapepath(self, p): if os.name == 'nt': - return ( - (b''.join(c.isalpha() and b'[%s%s]' % (c.lower(), c.upper()) or - c in b'/\\' and br'[/\\]' or c.isdigit() and c or b'\\' + c - for c in p)) + return b''.join( + c.isalpha() + and b'[%s%s]' % (c.lower(), c.upper()) + or c in b'/\\' + and br'[/\\]' + or c.isdigit() + and c + or b'\\' + c + for c in [p[i : i + 1] for i in range(len(p))] ) else: return re.escape(p) @@ -1014,7 +1281,7 @@ environment.""" # Put the restoreenv script inside self._threadtmp scriptpath = os.path.join(self._threadtmp, b'restoreenv.sh') - testenv['HGTEST_RESTOREENV'] = scriptpath + testenv['HGTEST_RESTOREENV'] = _strpath(scriptpath) # Only restore environment variable names that the shell allows # us to export. @@ -1040,6 +1307,7 @@ def _getenv(self): """Obtain environment variables to use during test execution.""" + def defineport(i): offset = '' if i == 0 else '%s' % i env["HGPORT%s" % offset] = '%s' % (self._startport + i) @@ -1043,6 +1311,7 @@ def defineport(i): offset = '' if i == 0 else '%s' % i env["HGPORT%s" % offset] = '%s' % (self._startport + i) + env = os.environ.copy() env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase') or '' env['HGEMITWARNINGS'] = '1' @@ -1046,9 +1315,10 @@ env = os.environ.copy() env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase') or '' env['HGEMITWARNINGS'] = '1' - env['TESTTMP'] = self._testtmp - env['HOME'] = self._testtmp + env['TESTTMP'] = _strpath(self._testtmp) + env['TESTNAME'] = self.name + env['HOME'] = _strpath(self._testtmp) # This number should match portneeded in _getport for port in xrange(3): # This list should be parallel to _portmap in _getreplacements defineport(port) @@ -1051,12 +1321,14 @@ # This number should match portneeded in _getport for port in xrange(3): # This list should be parallel to _portmap in _getreplacements defineport(port) - env["HGRCPATH"] = os.path.join(self._threadtmp, b'.hgrc') - env["DAEMON_PIDS"] = os.path.join(self._threadtmp, b'daemon.pids') - env["HGEDITOR"] = ('"' + sys.executable + '"' - + ' -c "import sys; sys.exit(0)"') - env["HGMERGE"] = "internal:merge" - env["HGUSER"] = "test" + env["HGRCPATH"] = _strpath(os.path.join(self._threadtmp, b'.hgrc')) + env["DAEMON_PIDS"] = _strpath( + os.path.join(self._threadtmp, b'daemon.pids') + ) + env["HGEDITOR"] = ( + '"' + sysexecutable + '"' + ' -c "import sys; sys.exit(0)"' + ) + env["HGUSER"] = "test" env["HGENCODING"] = "ascii" env["HGENCODINGMODE"] = "strict" @@ -1061,3 +1333,4 @@ env["HGENCODING"] = "ascii" env["HGENCODINGMODE"] = "strict" + env["HGHOSTNAME"] = "test-hostname" env['HGIPV6'] = str(int(self._useipv6)) @@ -1063,4 +1336,22 @@ env['HGIPV6'] = str(int(self._useipv6)) + # See contrib/catapipe.py for how to use this functionality. + if 'HGTESTCATAPULTSERVERPIPE' not in env: + # If we don't have HGTESTCATAPULTSERVERPIPE explicitly set, pull the + # non-test one in as a default, otherwise set to devnull + env['HGTESTCATAPULTSERVERPIPE'] = env.get( + 'HGCATAPULTSERVERPIPE', os.devnull + ) + + extraextensions = [] + for opt in self._extraconfigopts: + section, key = opt.encode('utf-8').split(b'.', 1) + if section != 'extensions': + continue + name = key.split(b'=', 1)[0] + extraextensions.append(name) + + if extraextensions: + env['HGTESTEXTRAEXTENSIONS'] = b' '.join(extraextensions) # LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw # IP addresses. @@ -1064,7 +1355,27 @@ # LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw # IP addresses. - env['LOCALIP'] = self._localip() + env['LOCALIP'] = _strpath(self._localip()) + + # This has the same effect as Py_LegacyWindowsStdioFlag in exewrapper.c, + # but this is needed for testing python instances like dummyssh, + # dummysmtpd.py, and dumbhttp.py. + if PYTHON3 and os.name == 'nt': + env['PYTHONLEGACYWINDOWSSTDIO'] = '1' + + # Modified HOME in test environment can confuse Rust tools. So set + # CARGO_HOME and RUSTUP_HOME automatically if a Rust toolchain is + # present and these variables aren't already defined. + cargo_home_path = os.path.expanduser('~/.cargo') + rustup_home_path = os.path.expanduser('~/.rustup') + + if os.path.exists(cargo_home_path) and b'CARGO_HOME' not in osenvironb: + env['CARGO_HOME'] = cargo_home_path + if ( + os.path.exists(rustup_home_path) + and b'RUSTUP_HOME' not in osenvironb + ): + env['RUSTUP_HOME'] = rustup_home_path # Reset some environment variables to well-known values so that # the tests produce repeatable output. @@ -1074,10 +1385,25 @@ env['COLUMNS'] = '80' env['TERM'] = 'xterm' - for k in ('HG HGPROF CDPATH GREP_OPTIONS http_proxy no_proxy ' + - 'HGPLAIN HGPLAINEXCEPT EDITOR VISUAL PAGER ' + - 'NO_PROXY CHGDEBUG').split(): + dropped = [ + 'CDPATH', + 'CHGDEBUG', + 'EDITOR', + 'GREP_OPTIONS', + 'HG', + 'HGMERGE', + 'HGPLAIN', + 'HGPLAINEXCEPT', + 'HGPROF', + 'http_proxy', + 'no_proxy', + 'NO_PROXY', + 'PAGER', + 'VISUAL', + ] + + for k in dropped: if k in env: del env[k] # unset env related to hooks @@ -1080,8 +1406,8 @@ if k in env: del env[k] # unset env related to hooks - for k in env.keys(): + for k in list(env): if k.startswith('HG_'): del env[k] @@ -1096,6 +1422,7 @@ hgrc.write(b'[ui]\n') hgrc.write(b'slash = True\n') hgrc.write(b'interactive = False\n') + hgrc.write(b'merge = internal:merge\n') hgrc.write(b'mergemarkers = detailed\n') hgrc.write(b'promptecho = True\n') hgrc.write(b'[defaults]\n') @@ -1103,6 +1430,8 @@ hgrc.write(b'all-warnings = true\n') hgrc.write(b'default-date = 0 0\n') hgrc.write(b'[largefiles]\n') - hgrc.write(b'usercache = %s\n' % - (os.path.join(self._testtmp, b'.cache/largefiles'))) + hgrc.write( + b'usercache = %s\n' + % (os.path.join(self._testtmp, b'.cache/largefiles')) + ) hgrc.write(b'[lfs]\n') @@ -1108,6 +1437,8 @@ hgrc.write(b'[lfs]\n') - hgrc.write(b'usercache = %s\n' % - (os.path.join(self._testtmp, b'.cache/lfs'))) + hgrc.write( + b'usercache = %s\n' + % (os.path.join(self._testtmp, b'.cache/lfs')) + ) hgrc.write(b'[web]\n') hgrc.write(b'address = localhost\n') hgrc.write(b'ipv6 = %s\n' % str(self._useipv6).encode('ascii')) @@ -1111,6 +1442,7 @@ hgrc.write(b'[web]\n') hgrc.write(b'address = localhost\n') hgrc.write(b'ipv6 = %s\n' % str(self._useipv6).encode('ascii')) + hgrc.write(b'server-header = testing stub value\n') for opt in self._extraconfigopts: section, key = opt.encode('utf-8').split(b'.', 1) @@ -1114,8 +1446,9 @@ for opt in self._extraconfigopts: section, key = opt.encode('utf-8').split(b'.', 1) - assert b'=' in key, ('extra config opt %s must ' - 'have an = for assignment' % opt) + assert b'=' in key, ( + 'extra config opt %s must ' 'have an = for assignment' % opt + ) hgrc.write(b'[%s]\n%s\n' % (section, key)) def fail(self, msg): @@ -1130,9 +1463,10 @@ Return a tuple (exitcode, output). output is None in debug mode. """ if self._debug: - proc = subprocess.Popen(cmd, shell=True, cwd=self._testtmp, - env=env) + proc = subprocess.Popen( + _strpath(cmd), shell=True, cwd=_strpath(self._testtmp), env=env + ) ret = proc.wait() return (ret, None) proc = Popen4(cmd, self._testtmp, self._timeout, env) @@ -1135,7 +1469,8 @@ ret = proc.wait() return (ret, None) proc = Popen4(cmd, self._testtmp, self._timeout, env) + def cleanup(): terminate(proc) ret = proc.wait() @@ -1144,7 +1479,6 @@ killdaemons(env['DAEMON_PIDS']) return ret - output = '' proc.tochild.close() try: @@ -1168,7 +1502,7 @@ output = re.sub(s, r, output) if normalizenewlines: - output = output.replace('\r\n', '\n') + output = output.replace(b'\r\n', b'\n') return ret, output.splitlines(True) @@ -1172,6 +1506,7 @@ return ret, output.splitlines(True) + class PythonTest(Test): """A Python-based test.""" @@ -1180,7 +1515,8 @@ return os.path.join(self._testdir, b'%s.out' % self.bname) def _run(self, env): - py3kswitch = self._py3kwarnings and b' -3' or b'' - cmd = b'%s%s "%s"' % (PYTHON, py3kswitch, self.path) - vlog("# Running", cmd) + py3switch = self._py3warnings and b' -3' or b'' + # Quote the python(3) executable for Windows + cmd = b'"%s"%s "%s"' % (PYTHON, py3switch, self.path) + vlog("# Running", cmd.decode("utf-8")) normalizenewlines = os.name == 'nt' @@ -1186,8 +1522,7 @@ normalizenewlines = os.name == 'nt' - result = self._runcommand(cmd, env, - normalizenewlines=normalizenewlines) + result = self._runcommand(cmd, env, normalizenewlines=normalizenewlines) if self._aborted: raise KeyboardInterrupt() return result @@ -1189,8 +1524,9 @@ if self._aborted: raise KeyboardInterrupt() return result + # Some glob patterns apply only in some circumstances, so the script # might want to remove (glob) annotations that otherwise should be # retained. @@ -1209,6 +1545,17 @@ if PYTHON3: bchr = lambda x: bytes([x]) +WARN_UNDEFINED = 1 +WARN_YES = 2 +WARN_NO = 3 + +MARK_OPTIONAL = b" (?)\n" + + +def isoptional(line): + return line.endswith(MARK_OPTIONAL) + + class TTest(Test): """A "t test" is a test backed by a .t file.""" @@ -1222,5 +1569,5 @@ def __init__(self, path, *args, **kwds): # accept an extra "case" parameter - case = kwds.pop('case', None) + case = kwds.pop('case', []) self._case = case @@ -1226,4 +1573,4 @@ self._case = case - self._allcases = parsettestcases(path) + self._allcases = {x for y in parsettestcases(path) for x in y} super(TTest, self).__init__(path, *args, **kwds) if case: @@ -1228,8 +1575,10 @@ super(TTest, self).__init__(path, *args, **kwds) if case: - self.name = '%s (case %s)' % (self.name, _strpath(case)) - self.errpath = b'%s.%s.err' % (self.errpath[:-4], case) - self._tmpname += b'-%s' % case + casepath = b'#'.join(case) + self.name = '%s#%s' % (self.name, _strpath(casepath)) + self.errpath = b'%s#%s.err' % (self.errpath[:-4], casepath) + self._tmpname += b'-%s' % casepath + self._have = {} @property def refpath(self): @@ -1254,7 +1603,7 @@ f.write(l) cmd = b'%s "%s"' % (self._shell, fname) - vlog("# Running", cmd) + vlog("# Running", cmd.decode("utf-8")) exitcode, output = self._runcommand(cmd, env) @@ -1269,6 +1618,13 @@ return self._processoutput(exitcode, output, salt, after, expected) def _hghave(self, reqs): + allreqs = b' '.join(reqs) + + self._detectslow(reqs) + + if allreqs in self._have: + return self._have.get(allreqs) + # TODO do something smarter when all other uses of hghave are gone. runtestdir = os.path.abspath(os.path.dirname(_bytespath(__file__))) tdir = runtestdir.replace(b'\\', b'/') @@ -1272,9 +1628,12 @@ # TODO do something smarter when all other uses of hghave are gone. runtestdir = os.path.abspath(os.path.dirname(_bytespath(__file__))) tdir = runtestdir.replace(b'\\', b'/') - proc = Popen4(b'%s -c "%s/hghave %s"' % - (self._shell, tdir, b' '.join(reqs)), - self._testtmp, 0, self._getenv()) + proc = Popen4( + b'%s -c "%s/hghave %s"' % (self._shell, tdir, allreqs), + self._testtmp, + 0, + self._getenv(), + ) stdout, stderr = proc.communicate() ret = proc.wait() if wifexited(ret): @@ -1284,5 +1643,6 @@ sys.exit(1) if ret != 0: + self._have[allreqs] = (False, stdout) return False, stdout @@ -1287,4 +1647,9 @@ return False, stdout + self._have[allreqs] = (True, None) + return True, None + + def _detectslow(self, reqs): + """update the timeout of slow test when appropriate""" if b'slow' in reqs: self._timeout = self._slowtimeout @@ -1289,9 +1654,8 @@ if b'slow' in reqs: self._timeout = self._slowtimeout - return True, None def _iftest(self, args): # implements "#if" reqs = [] for arg in args: if arg.startswith(b'no-') and arg[3:] in self._allcases: @@ -1292,9 +1656,9 @@ def _iftest(self, args): # implements "#if" reqs = [] for arg in args: if arg.startswith(b'no-') and arg[3:] in self._allcases: - if arg[3:] == self._case: + if arg[3:] in self._case: return False elif arg in self._allcases: @@ -1299,6 +1663,6 @@ return False elif arg in self._allcases: - if arg != self._case: + if arg not in self._case: return False else: reqs.append(arg) @@ -1302,6 +1666,7 @@ return False else: reqs.append(arg) + self._detectslow(reqs) return self._hghave(reqs)[0] def _parsetest(self, lines): @@ -1309,9 +1674,10 @@ # up script results with our source. These markers include input # line number and the last return code. salt = b"SALT%d" % time.time() + def addsalt(line, inpython): if inpython: script.append(b'%s %d 0\n' % (salt, line)) else: script.append(b'echo %s %d $?\n' % (salt, line)) @@ -1312,9 +1678,40 @@ def addsalt(line, inpython): if inpython: script.append(b'%s %d 0\n' % (salt, line)) else: script.append(b'echo %s %d $?\n' % (salt, line)) + activetrace = [] + session = str(uuid.uuid4()) + if PYTHON3: + session = session.encode('ascii') + hgcatapult = os.getenv('HGTESTCATAPULTSERVERPIPE') or os.getenv( + 'HGCATAPULTSERVERPIPE' + ) + + def toggletrace(cmd=None): + if not hgcatapult or hgcatapult == os.devnull: + return + + if activetrace: + script.append( + b'echo END %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n' + % (session, activetrace[0]) + ) + if cmd is None: + return + + if isinstance(cmd, str): + quoted = shellquote(cmd.strip()) + else: + quoted = shellquote(cmd.strip().decode('utf8')).encode('utf8') + quoted = quoted.replace(b'\\', b'\\\\') + script.append( + b'echo START %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n' + % (session, quoted) + ) + activetrace[0:] = [quoted] + script = [] # After we run the shell script, we re-unify the script output @@ -1341,4 +1738,34 @@ script.append(b'alias hg="%s"\n' % self._hgcommand) if os.getenv('MSYSTEM'): script.append(b'alias pwd="pwd -W"\n') + + if hgcatapult and hgcatapult != os.devnull: + if PYTHON3: + hgcatapult = hgcatapult.encode('utf8') + cataname = self.name.encode('utf8') + else: + cataname = self.name + + # Kludge: use a while loop to keep the pipe from getting + # closed by our echo commands. The still-running file gets + # reaped at the end of the script, which causes the while + # loop to exit and closes the pipe. Sigh. + script.append( + b'rtendtracing() {\n' + b' echo END %(session)s %(name)s >> %(catapult)s\n' + b' rm -f "$TESTTMP/.still-running"\n' + b'}\n' + b'trap "rtendtracing" 0\n' + b'touch "$TESTTMP/.still-running"\n' + b'while [ -f "$TESTTMP/.still-running" ]; do sleep 1; done ' + b'> %(catapult)s &\n' + b'HGCATAPULTSESSION=%(session)s ; export HGCATAPULTSESSION\n' + b'echo START %(session)s %(name)s >> %(catapult)s\n' + % { + b'name': cataname, + b'session': session, + b'catapult': hgcatapult, + } + ) + if self._case: @@ -1344,2 +1771,3 @@ if self._case: + casestr = b'#'.join(self._case) if isinstance(self._case, str): @@ -1345,3 +1773,3 @@ if isinstance(self._case, str): - quoted = shellquote(self._case) + quoted = shellquote(casestr) else: @@ -1347,5 +1775,5 @@ else: - quoted = shellquote(self._case.decode('utf8')).encode('utf8') + quoted = shellquote(casestr.decode('utf8')).encode('utf8') script.append(b'TESTCASE=%s\n' % quoted) script.append(b'export TESTCASE\n') @@ -1356,12 +1784,15 @@ if l.startswith(b'#require'): lsplit = l.split() if len(lsplit) < 2 or lsplit[0] != b'#require': - after.setdefault(pos, []).append(' !!! invalid #require\n') - haveresult, message = self._hghave(lsplit[1:]) - if not haveresult: - script = [b'echo "%s"\nexit 80\n' % message] - break + after.setdefault(pos, []).append( + b' !!! invalid #require\n' + ) + if not skipping: + haveresult, message = self._hghave(lsplit[1:]) + if not haveresult: + script = [b'echo "%s"\nexit 80\n' % message] + break after.setdefault(pos, []).append(l) elif l.startswith(b'#if'): lsplit = l.split() if len(lsplit) < 2 or lsplit[0] != b'#if': @@ -1364,6 +1795,6 @@ after.setdefault(pos, []).append(l) elif l.startswith(b'#if'): lsplit = l.split() if len(lsplit) < 2 or lsplit[0] != b'#if': - after.setdefault(pos, []).append(' !!! invalid #if\n') + after.setdefault(pos, []).append(b' !!! invalid #if\n') if skipping is not None: @@ -1369,6 +1800,6 @@ if skipping is not None: - after.setdefault(pos, []).append(' !!! nested #if\n') + after.setdefault(pos, []).append(b' !!! nested #if\n') skipping = not self._iftest(lsplit[1:]) after.setdefault(pos, []).append(l) elif l.startswith(b'#else'): if skipping is None: @@ -1371,9 +1802,9 @@ skipping = not self._iftest(lsplit[1:]) after.setdefault(pos, []).append(l) elif l.startswith(b'#else'): if skipping is None: - after.setdefault(pos, []).append(' !!! missing #if\n') + after.setdefault(pos, []).append(b' !!! missing #if\n') skipping = not skipping after.setdefault(pos, []).append(l) elif l.startswith(b'#endif'): if skipping is None: @@ -1376,9 +1807,9 @@ skipping = not skipping after.setdefault(pos, []).append(l) elif l.startswith(b'#endif'): if skipping is None: - after.setdefault(pos, []).append(' !!! missing #if\n') + after.setdefault(pos, []).append(b' !!! missing #if\n') skipping = None after.setdefault(pos, []).append(l) elif skipping: after.setdefault(pos, []).append(l) @@ -1381,11 +1812,11 @@ skipping = None after.setdefault(pos, []).append(l) elif skipping: after.setdefault(pos, []).append(l) - elif l.startswith(b' >>> '): # python inlines + elif l.startswith(b' >>> '): # python inlines after.setdefault(pos, []).append(l) prepos = pos pos = n if not inpython: # We've just entered a Python block. Add the header. inpython = True @@ -1386,10 +1817,10 @@ after.setdefault(pos, []).append(l) prepos = pos pos = n if not inpython: # We've just entered a Python block. Add the header. inpython = True - addsalt(prepos, False) # Make sure we report the exit code. - script.append(b'%s -m heredoctest <<EOF\n' % PYTHON) + addsalt(prepos, False) # Make sure we report the exit code. + script.append(b'"%s" -m heredoctest <<EOF\n' % PYTHON) addsalt(n, True) script.append(l[2:]) @@ -1394,5 +1825,5 @@ addsalt(n, True) script.append(l[2:]) - elif l.startswith(b' ... '): # python inlines + elif l.startswith(b' ... '): # python inlines after.setdefault(prepos, []).append(l) script.append(l[2:]) @@ -1397,6 +1828,6 @@ after.setdefault(prepos, []).append(l) script.append(l[2:]) - elif l.startswith(b' $ '): # commands + elif l.startswith(b' $ '): # commands if inpython: script.append(b'EOF\n') inpython = False @@ -1404,5 +1835,7 @@ prepos = pos pos = n addsalt(n, False) - cmd = l[4:].split() + rawcmd = l[4:] + cmd = rawcmd.split() + toggletrace(rawcmd) if len(cmd) == 2 and cmd[0] == b'cd': @@ -1408,6 +1841,6 @@ if len(cmd) == 2 and cmd[0] == b'cd': - l = b' $ cd %s || exit 1\n' % cmd[1] - script.append(l[4:]) - elif l.startswith(b' > '): # continuations + rawcmd = b'cd %s || exit 1\n' % cmd[1] + script.append(rawcmd) + elif l.startswith(b' > '): # continuations after.setdefault(prepos, []).append(l) script.append(l[4:]) @@ -1412,6 +1845,6 @@ after.setdefault(prepos, []).append(l) script.append(l[4:]) - elif l.startswith(b' '): # results + elif l.startswith(b' '): # results # Queue up a list of expected results. expected.setdefault(pos, []).append(l[2:]) else: @@ -1424,5 +1857,5 @@ if inpython: script.append(b'EOF\n') if skipping is not None: - after.setdefault(pos, []).append(' !!! missing #endif\n') + after.setdefault(pos, []).append(b' !!! missing #endif\n') addsalt(n + 1, False) @@ -1428,6 +1861,8 @@ addsalt(n + 1, False) - + # Need to end any current per-command trace + if activetrace: + toggletrace() return salt, script, after, expected def _processoutput(self, exitcode, output, salt, after, expected): # Merge the script output back into a unified test. @@ -1430,6 +1865,6 @@ return salt, script, after, expected def _processoutput(self, exitcode, output, salt, after, expected): # Merge the script output back into a unified test. - warnonly = 1 # 1: not yet; 2: yes; 3: for sure not + warnonly = WARN_UNDEFINED # 1: not yet; 2: yes; 3: for sure not if exitcode != 0: @@ -1435,5 +1870,5 @@ if exitcode != 0: - warnonly = 3 + warnonly = WARN_NO pos = -1 postout = [] @@ -1437,100 +1872,16 @@ pos = -1 postout = [] - for l in output: - lout, lcmd = l, None - if salt in l: - lout, lcmd = l.split(salt, 1) - - while lout: - if not lout.endswith(b'\n'): - lout += b' (no-eol)\n' - - # Find the expected output at the current position. - els = [None] - if expected.get(pos, None): - els = expected[pos] - - i = 0 - optional = [] - while i < len(els): - el = els[i] - - r = self.linematch(el, lout) - if isinstance(r, str): - if r == '-glob': - lout = ''.join(el.rsplit(' (glob)', 1)) - r = '' # Warn only this line. - elif r == "retry": - postout.append(b' ' + el) - els.pop(i) - break - else: - log('\ninfo, unknown linematch result: %r\n' % r) - r = False - if r: - els.pop(i) - break - if el: - if el.endswith(b" (?)\n"): - optional.append(i) - else: - m = optline.match(el) - if m: - conditions = [ - c for c in m.group(2).split(b' ')] - - if not self._iftest(conditions): - optional.append(i) - - i += 1 - - if r: - if r == "retry": - continue - # clean up any optional leftovers - for i in optional: - postout.append(b' ' + els[i]) - for i in reversed(optional): - del els[i] - postout.append(b' ' + el) - else: - if self.NEEDESCAPE(lout): - lout = TTest._stringescape(b'%s (esc)\n' % - lout.rstrip(b'\n')) - postout.append(b' ' + lout) # Let diff deal with it. - if r != '': # If line failed. - warnonly = 3 # for sure not - elif warnonly == 1: # Is "not yet" and line is warn only. - warnonly = 2 # Yes do warn. - break - else: - # clean up any optional leftovers - while expected.get(pos, None): - el = expected[pos].pop(0) - if el: - if not el.endswith(b" (?)\n"): - m = optline.match(el) - if m: - conditions = [c for c in m.group(2).split(b' ')] - - if self._iftest(conditions): - # Don't append as optional line - continue - else: - continue - postout.append(b' ' + el) - - if lcmd: - # Add on last return code. - ret = int(lcmd.split()[1]) - if ret != 0: - postout.append(b' [%d]\n' % ret) - if pos in after: - # Merge in non-active test bits. - postout += after.pop(pos) - pos = int(lcmd.split()[0]) + for out_rawline in output: + out_line, cmd_line = out_rawline, None + if salt in out_rawline: + out_line, cmd_line = out_rawline.split(salt, 1) + + pos, postout, warnonly = self._process_out_line( + out_line, pos, postout, expected, warnonly + ) + pos, postout = self._process_cmd_line(cmd_line, pos, postout, after) if pos in after: postout += after.pop(pos) @@ -1533,9 +1884,9 @@ if pos in after: postout += after.pop(pos) - if warnonly == 2: - exitcode = False # Set exitcode to warned. + if warnonly == WARN_YES: + exitcode = False # Set exitcode to warned. return exitcode, postout @@ -1539,6 +1890,100 @@ return exitcode, postout + def _process_out_line(self, out_line, pos, postout, expected, warnonly): + while out_line: + if not out_line.endswith(b'\n'): + out_line += b' (no-eol)\n' + + # Find the expected output at the current position. + els = [None] + if expected.get(pos, None): + els = expected[pos] + + optional = [] + for i, el in enumerate(els): + r = False + if el: + r, exact = self.linematch(el, out_line) + if isinstance(r, str): + if r == '-glob': + out_line = ''.join(el.rsplit(' (glob)', 1)) + r = '' # Warn only this line. + elif r == "retry": + postout.append(b' ' + el) + else: + log('\ninfo, unknown linematch result: %r\n' % r) + r = False + if r: + els.pop(i) + break + if el: + if isoptional(el): + optional.append(i) + else: + m = optline.match(el) + if m: + conditions = [c for c in m.group(2).split(b' ')] + + if not self._iftest(conditions): + optional.append(i) + if exact: + # Don't allow line to be matches against a later + # line in the output + els.pop(i) + break + + if r: + if r == "retry": + continue + # clean up any optional leftovers + for i in optional: + postout.append(b' ' + els[i]) + for i in reversed(optional): + del els[i] + postout.append(b' ' + el) + else: + if self.NEEDESCAPE(out_line): + out_line = TTest._stringescape( + b'%s (esc)\n' % out_line.rstrip(b'\n') + ) + postout.append(b' ' + out_line) # Let diff deal with it. + if r != '': # If line failed. + warnonly = WARN_NO + elif warnonly == WARN_UNDEFINED: + warnonly = WARN_YES + break + else: + # clean up any optional leftovers + while expected.get(pos, None): + el = expected[pos].pop(0) + if el: + if not isoptional(el): + m = optline.match(el) + if m: + conditions = [c for c in m.group(2).split(b' ')] + + if self._iftest(conditions): + # Don't append as optional line + continue + else: + continue + postout.append(b' ' + el) + return pos, postout, warnonly + + def _process_cmd_line(self, cmd_line, pos, postout, after): + """process a "command" part of a line from unified test output""" + if cmd_line: + # Add on last return code. + ret = int(cmd_line.split()[1]) + if ret != 0: + postout.append(b' [%d]\n' % ret) + if pos in after: + # Merge in non-active test bits. + postout += after.pop(pos) + pos = int(cmd_line.split()[0]) + return pos, postout + @staticmethod def rematch(el, l): try: @@ -1542,7 +1987,11 @@ @staticmethod def rematch(el, l): try: - el = b'(?:' + el + b')' + # parse any flags at the beginning of the regex. Only 'i' is + # supported right now, but this should be easy to extend. + flags, el = re.match(br'^(\(\?i\))?(.*)', el).groups()[0:2] + flags = flags or b'' + el = flags + b'(?:' + el + b')' # use \Z to ensure that the regex matches to the end of the string if os.name == 'nt': return re.match(el + br'\r?\n\Z', l) @@ -1567,5 +2016,5 @@ i, n = 0, len(el) res = b'' while i < n: - c = el[i:i + 1] + c = el[i : i + 1] i += 1 @@ -1571,6 +2020,6 @@ i += 1 - if c == b'\\' and i < n and el[i:i + 1] in b'*?\\/': - res += el[i - 1:i + 1] + if c == b'\\' and i < n and el[i : i + 1] in b'*?\\/': + res += el[i - 1 : i + 1] i += 1 elif c == b'*': res += b'.*' @@ -1583,4 +2032,6 @@ return TTest.rematch(res, l) def linematch(self, el, l): + if el == l: # perfect match (fast) + return True, True retry = False @@ -1586,8 +2037,19 @@ retry = False - if el == l: # perfect match (fast) - return True - if el: - if el.endswith(b" (?)\n"): - retry = "retry" - el = el[:-5] + b"\n" + if isoptional(el): + retry = "retry" + el = el[: -len(MARK_OPTIONAL)] + b"\n" + else: + m = optline.match(el) + if m: + conditions = [c for c in m.group(2).split(b' ')] + + el = m.group(1) + b"\n" + if not self._iftest(conditions): + # listed feature missing, should not match + return "retry", False + + if el.endswith(b" (esc)\n"): + if PYTHON3: + el = el[:-7].decode('unicode_escape') + '\n' + el = el.encode('utf-8') else: @@ -1593,32 +2055,19 @@ else: - m = optline.match(el) - if m: - conditions = [c for c in m.group(2).split(b' ')] - - el = m.group(1) + b"\n" - if not self._iftest(conditions): - retry = "retry" # Not required by listed features - - if el.endswith(b" (esc)\n"): - if PYTHON3: - el = el[:-7].decode('unicode_escape') + '\n' - el = el.encode('utf-8') - else: - el = el[:-7].decode('string-escape') + '\n' - if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l: - return True - if el.endswith(b" (re)\n"): - return TTest.rematch(el[:-6], l) or retry - if el.endswith(b" (glob)\n"): - # ignore '(glob)' added to l by 'replacements' - if l.endswith(b" (glob)\n"): - l = l[:-8] + b"\n" - return TTest.globmatch(el[:-8], l) or retry - if os.altsep: - _l = l.replace(b'\\', b'/') - if el == _l or os.name == 'nt' and el[:-1] + b'\r\n' == _l: - return True - return retry + el = el[:-7].decode('string-escape') + '\n' + if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l: + return True, True + if el.endswith(b" (re)\n"): + return (TTest.rematch(el[:-6], l) or retry), False + if el.endswith(b" (glob)\n"): + # ignore '(glob)' added to l by 'replacements' + if l.endswith(b" (glob)\n"): + l = l[:-8] + b"\n" + return (TTest.globmatch(el[:-8], l) or retry), False + if os.altsep: + _l = l.replace(b'\\', b'/') + if el == _l or os.name == 'nt' and el[:-1] + b'\r\n' == _l: + return True, True + return retry, True @staticmethod def parsehghaveoutput(lines): @@ -1632,6 +2081,8 @@ for line in lines: if line.startswith(TTest.SKIPPED_PREFIX): line = line.splitlines()[0] - missing.append(line[len(TTest.SKIPPED_PREFIX):].decode('utf-8')) + missing.append( + line[len(TTest.SKIPPED_PREFIX) :].decode('utf-8') + ) elif line.startswith(TTest.FAILED_PREFIX): line = line.splitlines()[0] @@ -1636,6 +2087,6 @@ elif line.startswith(TTest.FAILED_PREFIX): line = line.splitlines()[0] - failed.append(line[len(TTest.FAILED_PREFIX):].decode('utf-8')) + failed.append(line[len(TTest.FAILED_PREFIX) :].decode('utf-8')) return missing, failed @@ -1647,7 +2098,8 @@ def _stringescape(s): return TTest.ESCAPESUB(TTest._escapef, s) + iolock = threading.RLock() firstlock = threading.RLock() firsterror = False @@ -1650,6 +2102,7 @@ iolock = threading.RLock() firstlock = threading.RLock() firsterror = False + class TestResult(unittest._TextTestResult): """Holds results when executing via unittest.""" @@ -1654,5 +2107,6 @@ class TestResult(unittest._TextTestResult): """Holds results when executing via unittest.""" + # Don't worry too much about accessing the non-public _TextTestResult. # It is relatively common in Python testing tools. def __init__(self, options, *args, **kwargs): @@ -1679,6 +2133,6 @@ self.color = pygmentspresent and self.stream.isatty() elif options.color == 'never': self.color = False - else: # 'always', for testing purposes + else: # 'always', for testing purposes self.color = pygmentspresent @@ -1683,5 +2137,13 @@ self.color = pygmentspresent + def onStart(self, test): + """ Can be overriden by custom TestResult + """ + + def onEnd(self): + """ Can be overriden by custom TestResult + """ + def addFailure(self, test, reason): self.failures.append((test, reason)) @@ -1749,8 +2211,9 @@ pass elif self._options.view: v = self._options.view - if PYTHON3: - v = _bytespath(v) - os.system(b"%s %s %s" % - (v, test.refpath, test.errpath)) + subprocess.call( + r'"%s" "%s" "%s"' + % (v, _strpath(test.refpath), _strpath(test.errpath)), + shell=True, + ) else: @@ -1756,5 +2219,17 @@ else: - servefail, lines = getdiff(expected, got, - test.refpath, test.errpath) + servefail, lines = getdiff( + expected, got, test.refpath, test.errpath + ) + self.stream.write('\n') + for line in lines: + line = highlightdiff(line, self.color) + if PYTHON3: + self.stream.flush() + self.stream.buffer.write(line) + self.stream.buffer.flush() + else: + self.stream.write(line) + self.stream.flush() + if servefail: raise test.failureException( @@ -1759,20 +2234,10 @@ if servefail: raise test.failureException( - 'server failed to start (HGPORT=%s)' % test._startport) - else: - self.stream.write('\n') - for line in lines: - line = highlightdiff(line, self.color) - if PYTHON3: - self.stream.flush() - self.stream.buffer.write(line) - self.stream.buffer.flush() - else: - self.stream.write(line) - self.stream.flush() + 'server failed to start (HGPORT=%s)' % test._startport + ) # handle interactive prompt without releasing iolock if self._options.interactive: if test.readrefout() != expected: self.stream.write( 'Reference output has changed (run again to prompt ' @@ -1773,9 +2238,10 @@ # handle interactive prompt without releasing iolock if self._options.interactive: if test.readrefout() != expected: self.stream.write( 'Reference output has changed (run again to prompt ' - 'changes)') + 'changes)' + ) else: self.stream.write('Accept this change? [n] ') @@ -1780,5 +2246,6 @@ else: self.stream.write('Accept this change? [n] ') + self.stream.flush() answer = sys.stdin.readline().strip() if answer.lower() in ('y', 'yes'): if test.path.endswith(b'.t'): @@ -1797,11 +2264,14 @@ # os.times module computes the user time and system time spent by # child's processes along with real elapsed time taken by a process. # This module has one limitation. It can only work for Linux user - # and not for Windows. - test.started = os.times() - if self._firststarttime is None: # thread racy but irrelevant - self._firststarttime = test.started[4] + # and not for Windows. Hence why we fall back to another function + # for wall time calculations. + test.started_times = os.times() + # TODO use a monotonic clock once support for Python 2.7 is dropped. + test.started_time = time.time() + if self._firststarttime is None: # thread racy but irrelevant + self._firststarttime = test.started_time def stopTest(self, test, interrupted=False): super(TestResult, self).stopTest(test) @@ -1804,9 +2274,10 @@ def stopTest(self, test, interrupted=False): super(TestResult, self).stopTest(test) - test.stopped = os.times() - - starttime = test.started - endtime = test.stopped + test.stopped_times = os.times() + stopped_time = time.time() + + starttime = test.started_times + endtime = test.stopped_times origin = self._firststarttime @@ -1812,11 +2283,14 @@ origin = self._firststarttime - self.times.append((test.name, - endtime[2] - starttime[2], # user space CPU time - endtime[3] - starttime[3], # sys space CPU time - endtime[4] - starttime[4], # real time - starttime[4] - origin, # start date in run context - endtime[4] - origin, # end date in run context - )) + self.times.append( + ( + test.name, + endtime[2] - starttime[2], # user space CPU time + endtime[3] - starttime[3], # sys space CPU time + stopped_time - test.started_time, # real time + test.started_time - origin, # start date in run context + stopped_time - origin, # end date in run context + ) + ) if interrupted: with iolock: @@ -1820,9 +2294,23 @@ if interrupted: with iolock: - self.stream.writeln('INTERRUPTED: %s (after %d seconds)' % ( - test.name, self.times[-1][3])) + self.stream.writeln( + 'INTERRUPTED: %s (after %d seconds)' + % (test.name, self.times[-1][3]) + ) + + +def getTestResult(): + """ + Returns the relevant test result + """ + if "CUSTOM_TEST_RESULT" in os.environ: + testresultmodule = __import__(os.environ["CUSTOM_TEST_RESULT"]) + return testresultmodule.TestResult + else: + return TestResult + class TestSuite(unittest.TestSuite): """Custom unittest TestSuite that knows how to execute Mercurial tests.""" @@ -1825,11 +2313,22 @@ class TestSuite(unittest.TestSuite): """Custom unittest TestSuite that knows how to execute Mercurial tests.""" - def __init__(self, testdir, jobs=1, whitelist=None, blacklist=None, - retest=False, keywords=None, loop=False, runs_per_test=1, - loadtest=None, showchannels=False, - *args, **kwargs): + def __init__( + self, + testdir, + jobs=1, + whitelist=None, + blacklist=None, + retest=False, + keywords=None, + loop=False, + runs_per_test=1, + loadtest=None, + showchannels=False, + *args, + **kwargs + ): """Create a new instance that can run tests with a configuration. testdir specifies the directory where tests are executed from. This @@ -1876,8 +2375,9 @@ tests = [] num_tests = [0] for test in self._tests: + def get(): num_tests[0] += 1 if getattr(test, 'should_reload', False): return self._loadtest(test, num_tests[0]) return test @@ -1879,8 +2379,9 @@ def get(): num_tests[0] += 1 if getattr(test, 'should_reload', False): return self._loadtest(test, num_tests[0]) return test + if not os.path.exists(test.path): result.addSkip(test, "Doesn't exist") continue @@ -1928,7 +2429,7 @@ done.put(None) except KeyboardInterrupt: pass - except: # re-raises + except: # re-raises done.put(('!', test, 'run-test raised an error, see traceback')) raise finally: @@ -1953,7 +2454,7 @@ sys.stdout.flush() for x in xrange(10): if channels: - time.sleep(.1) + time.sleep(0.1) count += 1 stoppedearly = False @@ -1978,10 +2479,9 @@ if self._loop: if getattr(test, 'should_reload', False): num_tests[0] += 1 - tests.append( - self._loadtest(test, num_tests[0])) + tests.append(self._loadtest(test, num_tests[0])) else: tests.append(test) if self._jobs == 1: job(test, result) else: @@ -1983,10 +2483,11 @@ else: tests.append(test) if self._jobs == 1: job(test, result) else: - t = threading.Thread(target=job, name=test.name, - args=(test, result)) + t = threading.Thread( + target=job, name=test.name, args=(test, result) + ) t.start() running += 1 @@ -2009,8 +2510,9 @@ return result + # Save the most recent 5 wall-clock runtimes of each test to a # human-readable text file named .testtimes. Tests are sorted # alphabetically, while times for each test are listed from oldest to # newest. @@ -2012,8 +2514,9 @@ # Save the most recent 5 wall-clock runtimes of each test to a # human-readable text file named .testtimes. Tests are sorted # alphabetically, while times for each test are listed from oldest to # newest. + def loadtimes(outputdir): times = [] try: @@ -2017,5 +2520,5 @@ def loadtimes(outputdir): times = [] try: - with open(os.path.join(outputdir, b'.testtimes-')) as fp: + with open(os.path.join(outputdir, b'.testtimes')) as fp: for line in fp: @@ -2021,8 +2524,10 @@ for line in fp: - ts = line.split() - times.append((ts[0], [float(t) for t in ts[1:]])) + m = re.match('(.*?) ([0-9. ]+)', line) + times.append( + (m.group(1), [float(t) for t in m.group(2).split()]) + ) except IOError as err: if err.errno != errno.ENOENT: raise return times @@ -2024,8 +2529,9 @@ except IOError as err: if err.errno != errno.ENOENT: raise return times + def savetimes(outputdir, result): saved = dict(loadtimes(outputdir)) maxruns = 5 @@ -2037,8 +2543,9 @@ ts.append(real) ts[:] = ts[-maxruns:] - fd, tmpname = tempfile.mkstemp(prefix=b'.testtimes', - dir=outputdir, text=True) + fd, tmpname = tempfile.mkstemp( + prefix=b'.testtimes', dir=outputdir, text=True + ) with os.fdopen(fd, 'w') as fp: for name, ts in sorted(saved.items()): fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts]))) @@ -2052,6 +2559,7 @@ except OSError: pass + class TextTestRunner(unittest.TextTestRunner): """Custom unittest test runner that uses appropriate settings.""" @@ -2060,4 +2568,8 @@ self._runner = runner + self._result = getTestResult()( + self._runner.options, self.stream, self.descriptions, self.verbosity + ) + def listtests(self, test): @@ -2063,4 +2575,2 @@ def listtests(self, test): - result = TestResult(self._runner.options, self.stream, - self.descriptions, 0) test = sorted(test, key=lambda t: t.name) @@ -2066,3 +2576,6 @@ test = sorted(test, key=lambda t: t.name) + + self._result.onStart(test) + for t in test: print(t.name) @@ -2067,6 +2580,6 @@ for t in test: print(t.name) - result.addSuccess(t) + self._result.addSuccess(t) if self._runner.options.xunit: with open(self._runner.options.xunit, "wb") as xuf: @@ -2070,8 +2583,8 @@ if self._runner.options.xunit: with open(self._runner.options.xunit, "wb") as xuf: - self._writexunit(result, xuf) + self._writexunit(self._result, xuf) if self._runner.options.json: jsonpath = os.path.join(self._runner._outputdir, b'report.json') with open(jsonpath, 'w') as fp: @@ -2074,9 +2587,9 @@ if self._runner.options.json: jsonpath = os.path.join(self._runner._outputdir, b'report.json') with open(jsonpath, 'w') as fp: - self._writejson(result, fp) - - return result + self._writejson(self._result, fp) + + return self._result def run(self, test): @@ -2081,15 +2594,13 @@ def run(self, test): - result = TestResult(self._runner.options, self.stream, - self.descriptions, self.verbosity) - - test(result) - - failed = len(result.failures) - skipped = len(result.skipped) - ignored = len(result.ignored) + self._result.onStart(test) + test(self._result) + + failed = len(self._result.failures) + skipped = len(self._result.skipped) + ignored = len(self._result.ignored) with iolock: self.stream.writeln('') if not self._runner.options.noskips: @@ -2091,7 +2602,9 @@ with iolock: self.stream.writeln('') if not self._runner.options.noskips: - for test, msg in result.skipped: + for test, msg in sorted( + self._result.skipped, key=lambda s: s[0].name + ): formatted = 'Skipped %s: %s\n' % (test.name, msg) @@ -2097,4 +2610,7 @@ formatted = 'Skipped %s: %s\n' % (test.name, msg) - self.stream.write(highlightmsg(formatted, result.color)) - for test, msg in result.failures: + msg = highlightmsg(formatted, self._result.color) + self.stream.write(msg) + for test, msg in sorted( + self._result.failures, key=lambda f: f[0].name + ): formatted = 'Failed %s: %s\n' % (test.name, msg) @@ -2100,7 +2616,9 @@ formatted = 'Failed %s: %s\n' % (test.name, msg) - self.stream.write(highlightmsg(formatted, result.color)) - for test, msg in result.errors: + self.stream.write(highlightmsg(formatted, self._result.color)) + for test, msg in sorted( + self._result.errors, key=lambda e: e[0].name + ): self.stream.writeln('Errored %s: %s' % (test.name, msg)) if self._runner.options.xunit: with open(self._runner.options.xunit, "wb") as xuf: @@ -2103,9 +2621,9 @@ self.stream.writeln('Errored %s: %s' % (test.name, msg)) if self._runner.options.xunit: with open(self._runner.options.xunit, "wb") as xuf: - self._writexunit(result, xuf) + self._writexunit(self._result, xuf) if self._runner.options.json: jsonpath = os.path.join(self._runner._outputdir, b'report.json') with open(jsonpath, 'w') as fp: @@ -2108,8 +2626,8 @@ if self._runner.options.json: jsonpath = os.path.join(self._runner._outputdir, b'report.json') with open(jsonpath, 'w') as fp: - self._writejson(result, fp) + self._writejson(self._result, fp) self._runner._checkhglib('Tested') @@ -2113,6 +2631,6 @@ self._runner._checkhglib('Tested') - savetimes(self._runner._outputdir, result) + savetimes(self._runner._outputdir, self._result) if failed and self._runner.options.known_good_rev: @@ -2117,5 +2635,5 @@ if failed and self._runner.options.known_good_rev: - self._bisecttests(t for t, m in result.failures) + self._bisecttests(t for t, m in self._result.failures) self.stream.writeln( '# Ran %d tests, %d skipped, %d failed.' @@ -2120,4 +2638,5 @@ self.stream.writeln( '# Ran %d tests, %d skipped, %d failed.' - % (result.testsRun, skipped + ignored, failed)) + % (self._result.testsRun, skipped + ignored, failed) + ) if failed: @@ -2123,4 +2642,5 @@ if failed: - self.stream.writeln('python hash seed: %s' % - os.environ['PYTHONHASHSEED']) + self.stream.writeln( + 'python hash seed: %s' % os.environ['PYTHONHASHSEED'] + ) if self._runner.options.time: @@ -2126,5 +2646,5 @@ if self._runner.options.time: - self.printtimes(result.times) + self.printtimes(self._result.times) if self._runner.options.exceptions: exceptions = aggregateexceptions( @@ -2128,7 +2648,7 @@ if self._runner.options.exceptions: exceptions = aggregateexceptions( - os.path.join(self._runner._outputdir, b'exceptions')) - total = sum(exceptions.values()) + os.path.join(self._runner._outputdir, b'exceptions') + ) self.stream.writeln('Exceptions Report:') @@ -2133,9 +2653,25 @@ self.stream.writeln('Exceptions Report:') - self.stream.writeln('%d total from %d frames' % - (total, len(exceptions))) - for (frame, line, exc), count in exceptions.most_common(): - self.stream.writeln('%d\t%s: %s' % (count, frame, exc)) + self.stream.writeln( + '%d total from %d frames' + % (exceptions['total'], len(exceptions['exceptioncounts'])) + ) + combined = exceptions['combined'] + for key in sorted(combined, key=combined.get, reverse=True): + frame, line, exc = key + totalcount, testcount, leastcount, leasttest = combined[key] + + self.stream.writeln( + '%d (%d tests)\t%s: %s (%s - %d total)' + % ( + totalcount, + testcount, + frame, + exc, + leasttest, + leastcount, + ) + ) self.stream.flush() @@ -2139,10 +2675,10 @@ self.stream.flush() - return result + return self._result def _bisecttests(self, tests): bisectcmd = ['hg', 'bisect'] bisectrepo = self._runner.options.bisect_repo if bisectrepo: bisectcmd.extend(['-R', os.path.abspath(bisectrepo)]) @@ -2143,9 +2679,10 @@ def _bisecttests(self, tests): bisectcmd = ['hg', 'bisect'] bisectrepo = self._runner.options.bisect_repo if bisectrepo: bisectcmd.extend(['-R', os.path.abspath(bisectrepo)]) + def pread(args): env = os.environ.copy() env['HGPLAIN'] = '1' @@ -2149,8 +2686,9 @@ def pread(args): env = os.environ.copy() env['HGPLAIN'] = '1' - p = subprocess.Popen(args, stderr=subprocess.STDOUT, - stdout=subprocess.PIPE, env=env) + p = subprocess.Popen( + args, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, env=env + ) data = p.stdout.read() p.wait() return data @@ -2154,6 +2692,7 @@ data = p.stdout.read() p.wait() return data + for test in tests: pread(bisectcmd + ['--reset']), pread(bisectcmd + ['--bad', '.']) @@ -2164,7 +2703,6 @@ withhg = self._runner.options.with_hg if withhg: opts += ' --with-hg=%s ' % shellquote(_strpath(withhg)) - rtc = '%s %s %s %s' % (sys.executable, sys.argv[0], opts, - test) + rtc = '%s %s %s %s' % (sysexecutable, sys.argv[0], opts, test) data = pread(bisectcmd + ['--command', rtc]) m = re.search( @@ -2169,8 +2707,12 @@ data = pread(bisectcmd + ['--command', rtc]) m = re.search( - (br'\nThe first (?P<goodbad>bad|good) revision ' - br'is:\nchangeset: +\d+:(?P<node>[a-f0-9]+)\n.*\n' - br'summary: +(?P<summary>[^\n]+)\n'), - data, (re.MULTILINE | re.DOTALL)) + ( + br'\nThe first (?P<goodbad>bad|good) revision ' + br'is:\nchangeset: +\d+:(?P<node>[a-f0-9]+)\n.*\n' + br'summary: +(?P<summary>[^\n]+)\n' + ), + data, + (re.MULTILINE | re.DOTALL), + ) if m is None: self.stream.writeln( @@ -2175,5 +2717,6 @@ if m is None: self.stream.writeln( - 'Failed to identify failure point for %s' % test) + 'Failed to identify failure point for %s' % test + ) continue dat = m.groupdict() @@ -2178,4 +2721,4 @@ continue dat = m.groupdict() - verb = 'broken' if dat['goodbad'] == 'bad' else 'fixed' + verb = 'broken' if dat['goodbad'] == b'bad' else 'fixed' self.stream.writeln( @@ -2181,9 +2724,15 @@ self.stream.writeln( - '%s %s by %s (%s)' % ( - test, verb, dat['node'], dat['summary'])) + '%s %s by %s (%s)' + % ( + test, + verb, + dat['node'].decode('ascii'), + dat['summary'].decode('utf8', 'ignore'), + ) + ) def printtimes(self, times): # iolock held by run self.stream.writeln('# Producing time report') times.sort(key=lambda t: (t[3])) cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s' @@ -2184,11 +2733,13 @@ def printtimes(self, times): # iolock held by run self.stream.writeln('# Producing time report') times.sort(key=lambda t: (t[3])) cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s' - self.stream.writeln('%-7s %-7s %-7s %-7s %-7s %s' % - ('start', 'end', 'cuser', 'csys', 'real', 'Test')) + self.stream.writeln( + '%-7s %-7s %-7s %-7s %-7s %s' + % ('start', 'end', 'cuser', 'csys', 'real', 'Test') + ) for tdata in times: test = tdata[0] cuser, csys, real, start, end = tdata[1:6] @@ -2200,4 +2751,6 @@ timesd = dict((t[0], t[3]) for t in result.times) doc = minidom.Document() s = doc.createElement('testsuite') + s.setAttribute('errors', "0") # TODO + s.setAttribute('failures', str(len(result.failures))) s.setAttribute('name', 'run-tests') @@ -2203,2 +2756,5 @@ s.setAttribute('name', 'run-tests') + s.setAttribute( + 'skipped', str(len(result.skipped) + len(result.ignored)) + ) s.setAttribute('tests', str(result.testsRun)) @@ -2204,8 +2760,4 @@ s.setAttribute('tests', str(result.testsRun)) - s.setAttribute('errors', "0") # TODO - s.setAttribute('failures', str(len(result.failures))) - s.setAttribute('skipped', str(len(result.skipped) + - len(result.ignored))) doc.appendChild(s) for tc in result.successes: t = doc.createElement('testcase') @@ -2255,10 +2807,11 @@ timesd[test] = tdata[1:] outcome = {} - groups = [('success', ((tc, None) - for tc in result.successes)), - ('failure', result.failures), - ('skip', result.skipped)] + groups = [ + ('success', ((tc, None) for tc in result.successes)), + ('failure', result.failures), + ('skip', result.skipped), + ] for res, testcases in groups: for tc, __ in testcases: if tc.name in timesd: @@ -2267,16 +2820,17 @@ diff = diff.decode('unicode_escape') except UnicodeDecodeError as e: diff = '%r decoding diff, sorry' % e - tres = {'result': res, - 'time': ('%0.3f' % timesd[tc.name][2]), - 'cuser': ('%0.3f' % timesd[tc.name][0]), - 'csys': ('%0.3f' % timesd[tc.name][1]), - 'start': ('%0.3f' % timesd[tc.name][3]), - 'end': ('%0.3f' % timesd[tc.name][4]), - 'diff': diff, - } + tres = { + 'result': res, + 'time': ('%0.3f' % timesd[tc.name][2]), + 'cuser': ('%0.3f' % timesd[tc.name][0]), + 'csys': ('%0.3f' % timesd[tc.name][1]), + 'start': ('%0.3f' % timesd[tc.name][3]), + 'end': ('%0.3f' % timesd[tc.name][4]), + 'diff': diff, + } else: # blacklisted test tres = {'result': res} outcome[tc.name] = tres @@ -2278,9 +2832,10 @@ else: # blacklisted test tres = {'result': res} outcome[tc.name] = tres - jsonout = json.dumps(outcome, sort_keys=True, indent=4, - separators=(',', ': ')) + jsonout = json.dumps( + outcome, sort_keys=True, indent=4, separators=(',', ': ') + ) outf.writelines(("testreport =", jsonout)) @@ -2285,8 +2840,9 @@ outf.writelines(("testreport =", jsonout)) -def sorttests(testdescs, shuffle=False): + +def sorttests(testdescs, previoustimes, shuffle=False): """Do an in-place sort of tests.""" if shuffle: random.shuffle(testdescs) return @@ -2288,10 +2844,23 @@ """Do an in-place sort of tests.""" if shuffle: random.shuffle(testdescs) return - # keywords for slow tests - slow = {b'svn': 10, + if previoustimes: + + def sortkey(f): + f = f['path'] + if f in previoustimes: + # Use most recent time as estimate + return -(previoustimes[f][-1]) + else: + # Default to a rather arbitrary value of 1 second for new tests + return -1.0 + + else: + # keywords for slow tests + slow = { + b'svn': 10, b'cvs': 10, b'hghave': 10, b'largefiles-update': 10, @@ -2302,13 +2871,11 @@ b'check': 100, b'gendoc': 100, b'contrib-perf': 200, - } - perf = {} - - def sortkey(f): - # run largest tests first, as they tend to take the longest - f = f['path'] - try: - return perf[f] - except KeyError: + b'merge-combination': 100, + } + perf = {} + + def sortkey(f): + # run largest tests first, as they tend to take the longest + f = f['path'] try: @@ -2314,17 +2881,20 @@ try: - val = -os.stat(f).st_size - except OSError as e: - if e.errno != errno.ENOENT: - raise - perf[f] = -1e9 # file does not exist, tell early - return -1e9 - for kw, mul in slow.items(): - if kw in f: - val *= mul - if f.endswith(b'.py'): - val /= 10.0 - perf[f] = val / 1000.0 - return perf[f] + return perf[f] + except KeyError: + try: + val = -os.stat(f).st_size + except OSError as e: + if e.errno != errno.ENOENT: + raise + perf[f] = -1e9 # file does not exist, tell early + return -1e9 + for kw, mul in slow.items(): + if kw in f: + val *= mul + if f.endswith(b'.py'): + val /= 10.0 + perf[f] = val / 1000.0 + return perf[f] testdescs.sort(key=sortkey) @@ -2328,6 +2898,7 @@ testdescs.sort(key=sortkey) + class TestRunner(object): """Holds context for executing tests. @@ -2384,6 +2955,7 @@ testdescs = self.findtests(tests) if options.profile_runner: import statprof + statprof.start() result = self._run(testdescs) if options.profile_runner: @@ -2395,11 +2967,9 @@ os.umask(oldmask) def _run(self, testdescs): - sorttests(testdescs, shuffle=self.options.random) - - self._testdir = osenvironb[b'TESTDIR'] = getattr( - os, 'getcwdb', os.getcwd)() + testdir = getcwdb() + self._testdir = osenvironb[b'TESTDIR'] = getcwdb() # assume all tests in same folder for now if testdescs: pathname = os.path.dirname(testdescs[0]['path']) if pathname: @@ -2402,9 +2972,9 @@ # assume all tests in same folder for now if testdescs: pathname = os.path.dirname(testdescs[0]['path']) if pathname: - osenvironb[b'TESTDIR'] = os.path.join(osenvironb[b'TESTDIR'], - pathname) + testdir = os.path.join(testdir, pathname) + self._testdir = osenvironb[b'TESTDIR'] = testdir if self.options.outputdir: self._outputdir = canonpath(_bytespath(self.options.outputdir)) else: @@ -2408,6 +2978,6 @@ if self.options.outputdir: self._outputdir = canonpath(_bytespath(self.options.outputdir)) else: - self._outputdir = self._testdir + self._outputdir = getcwdb() if testdescs and pathname: self._outputdir = os.path.join(self._outputdir, pathname) @@ -2412,5 +2982,9 @@ if testdescs and pathname: self._outputdir = os.path.join(self._outputdir, pathname) + previoustimes = {} + if self.options.order_by_runtime: + previoustimes = dict(loadtimes(self._outputdir)) + sorttests(testdescs, previoustimes, shuffle=self.options.random) if 'PYTHONHASHSEED' not in os.environ: # use a random python hash seed all the time @@ -2436,8 +3010,7 @@ d = osenvironb.get(b'TMP', None) tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d) - self._hgtmp = osenvironb[b'HGTMP'] = ( - os.path.realpath(tmpdir)) + self._hgtmp = osenvironb[b'HGTMP'] = os.path.realpath(tmpdir) if self.options.with_hg: self._installdir = None @@ -2476,6 +3049,13 @@ self._tmpbindir = self._bindir self._pythondir = os.path.join(self._installdir, b"lib", b"python") + # Force the use of hg.exe instead of relying on MSYS to recognize hg is + # a python script and feed it to python.exe. Legacy stdio is force + # enabled by hg.exe, and this is a more realistic way to launch hg + # anyway. + if os.name == 'nt' and not self._hgcommand.endswith(b'.exe'): + self._hgcommand += b'.exe' + # set CHGHG, then replace "hg" command by "chg" chgbindir = self._bindir if self.options.chg or self.options.with_chg: @@ -2491,9 +3071,6 @@ osenvironb[b"BINDIR"] = self._bindir osenvironb[b"PYTHON"] = PYTHON - if self.options.with_python3: - osenvironb[b'PYTHON3'] = self.options.with_python3 - fileb = _bytespath(__file__) runtestdir = os.path.abspath(os.path.dirname(fileb)) osenvironb[b'RUNTESTDIR'] = runtestdir @@ -2555,9 +3132,10 @@ osenvironb[b'HGEXCEPTIONSDIR'] = exceptionsdir logexceptions = os.path.join(self._testdir, b'logexceptions.py') self.options.extra_config_opt.append( - 'extensions.logexceptions=%s' % logexceptions.decode('utf-8')) - - vlog("# Using TESTDIR", self._testdir) - vlog("# Using RUNTESTDIR", osenvironb[b'RUNTESTDIR']) - vlog("# Using HGTMP", self._hgtmp) + 'extensions.logexceptions=%s' % logexceptions.decode('utf-8') + ) + + vlog("# Using TESTDIR", _strpath(self._testdir)) + vlog("# Using RUNTESTDIR", _strpath(osenvironb[b'RUNTESTDIR'])) + vlog("# Using HGTMP", _strpath(self._hgtmp)) vlog("# Using PATH", os.environ["PATH"]) @@ -2563,7 +3141,9 @@ vlog("# Using PATH", os.environ["PATH"]) - vlog("# Using", IMPL_PATH, osenvironb[IMPL_PATH]) - vlog("# Writing to directory", self._outputdir) + vlog( + "# Using", _strpath(IMPL_PATH), _strpath(osenvironb[IMPL_PATH]), + ) + vlog("# Writing to directory", _strpath(self._outputdir)) try: return self._runtests(testdescs) or 0 finally: @@ -2566,8 +3146,8 @@ try: return self._runtests(testdescs) or 0 finally: - time.sleep(.1) + time.sleep(0.1) self._cleanup() def findtests(self, args): @@ -2578,8 +3158,12 @@ """ if not args: if self.options.changed: - proc = Popen4('hg st --rev "%s" -man0 .' % - self.options.changed, None, 0) + proc = Popen4( + b'hg st --rev "%s" -man0 .' + % _bytespath(self.options.changed), + None, + 0, + ) stdout, stderr = proc.communicate() args = stdout.strip(b'\0').split(b'\0') else: @@ -2595,5 +3179,6 @@ expanded_args.append(arg) args = expanded_args + testcasepattern = re.compile(br'([\w-]+\.t|py)(?:#([a-zA-Z0-9_\-.#]+))') tests = [] for t in args: @@ -2598,7 +3183,20 @@ tests = [] for t in args: - if not (os.path.basename(t).startswith(b'test-') - and (t.endswith(b'.py') or t.endswith(b'.t'))): - continue + case = [] + + if not ( + os.path.basename(t).startswith(b'test-') + and (t.endswith(b'.py') or t.endswith(b'.t')) + ): + + m = testcasepattern.match(os.path.basename(t)) + if m is not None: + t_basename, casestr = m.groups() + t = os.path.join(os.path.dirname(t), t_basename) + if casestr: + case = casestr.split(b'#') + else: + continue + if t.endswith(b'.t'): # .t file may contain multiple test cases @@ -2603,7 +3201,24 @@ if t.endswith(b'.t'): # .t file may contain multiple test cases - cases = sorted(parsettestcases(t)) - if cases: + casedimensions = parsettestcases(t) + if casedimensions: + cases = [] + + def addcases(case, casedimensions): + if not casedimensions: + cases.append(case) + else: + for c in casedimensions[0]: + addcases(case + [c], casedimensions[1:]) + + addcases([], casedimensions) + if case and case in cases: + cases = [case] + elif case: + # Ignore invalid cases + cases = [] + else: + pass tests += [{'path': t, 'case': c} for c in sorted(cases)] else: tests.append({'path': t}) @@ -2615,7 +3230,7 @@ def _reloadtest(test, i): # convert a test back to its description dict desc = {'path': test.path} - case = getattr(test, '_case', None) + case = getattr(test, '_case', []) if case: desc['case'] = case return self._gettest(desc, i) @@ -2627,7 +3242,8 @@ desc = testdescs[0] # desc['path'] is a relative path if 'case' in desc: - errpath = b'%s.%s.err' % (desc['path'], desc['case']) + casestr = b'#'.join(desc['case']) + errpath = b'%s#%s.err' % (desc['path'], casestr) else: errpath = b'%s.err' % desc['path'] errpath = os.path.join(self._outputdir, errpath) @@ -2639,9 +3255,12 @@ testdescs = orig tests = [self._gettest(d, i) for i, d in enumerate(testdescs)] + num_tests = len(tests) * self.options.runs_per_test + + jobs = min(num_tests, self.options.jobs) failed = False kws = self.options.keywords if kws is not None and PYTHON3: kws = kws.encode('utf-8') @@ -2642,17 +3261,20 @@ failed = False kws = self.options.keywords if kws is not None and PYTHON3: kws = kws.encode('utf-8') - suite = TestSuite(self._testdir, - jobs=self.options.jobs, - whitelist=self.options.whitelisted, - blacklist=self.options.blacklist, - retest=self.options.retest, - keywords=kws, - loop=self.options.loop, - runs_per_test=self.options.runs_per_test, - showchannels=self.options.showchannels, - tests=tests, loadtest=_reloadtest) + suite = TestSuite( + self._testdir, + jobs=jobs, + whitelist=self.options.whitelisted, + blacklist=self.options.blacklist, + retest=self.options.retest, + keywords=kws, + loop=self.options.loop, + runs_per_test=self.options.runs_per_test, + showchannels=self.options.showchannels, + tests=tests, + loadtest=_reloadtest, + ) verbosity = 1 @@ -2658,5 +3280,7 @@ verbosity = 1 - if self.options.verbose: + if self.options.list_tests: + verbosity = 0 + elif self.options.verbose: verbosity = 2 runner = TextTestRunner(self, verbosity=verbosity) @@ -2672,5 +3296,10 @@ assert self._installdir self._installchg() + log( + 'running %d tests using %d parallel processes' + % (num_tests, jobs) + ) + result = runner.run(suite) @@ -2675,5 +3304,5 @@ result = runner.run(suite) - if result.failures: + if result.failures or result.errors: failed = True @@ -2678,5 +3307,7 @@ failed = True + result.onEnd() + if self.options.anycoverage: self._outputcoverage() except KeyboardInterrupt: @@ -2687,7 +3318,7 @@ return 1 def _getport(self, count): - port = self._ports.get(count) # do we have a cached entry? + port = self._ports.get(count) # do we have a cached entry? if port is None: portneeded = 3 # above 100 tries we just give up and let test reports failure @@ -2719,9 +3350,9 @@ testcls = cls break - refpath = os.path.join(self._testdir, path) + refpath = os.path.join(getcwdb(), path) tmpdir = os.path.join(self._hgtmp, b'child%d' % count) # extra keyword parameters. 'case' is used by .t tests kwds = dict((k, testdesc[k]) for k in ['case'] if k in testdesc) @@ -2723,20 +3354,25 @@ tmpdir = os.path.join(self._hgtmp, b'child%d' % count) # extra keyword parameters. 'case' is used by .t tests kwds = dict((k, testdesc[k]) for k in ['case'] if k in testdesc) - t = testcls(refpath, self._outputdir, tmpdir, - keeptmpdir=self.options.keep_tmpdir, - debug=self.options.debug, - first=self.options.first, - timeout=self.options.timeout, - startport=self._getport(count), - extraconfigopts=self.options.extra_config_opt, - py3kwarnings=self.options.py3k_warnings, - shell=self.options.shell, - hgcommand=self._hgcommand, - usechg=bool(self.options.with_chg or self.options.chg), - useipv6=useipv6, **kwds) + t = testcls( + refpath, + self._outputdir, + tmpdir, + keeptmpdir=self.options.keep_tmpdir, + debug=self.options.debug, + first=self.options.first, + timeout=self.options.timeout, + startport=self._getport(count), + extraconfigopts=self.options.extra_config_opt, + py3warnings=self.options.py3_warnings, + shell=self.options.shell, + hgcommand=self._hgcommand, + usechg=bool(self.options.with_chg or self.options.chg), + useipv6=useipv6, + **kwds + ) t.should_reload = True return t @@ -2745,7 +3381,7 @@ if self.options.keep_tmpdir: return - vlog("# Cleaning up HGTMP", self._hgtmp) + vlog("# Cleaning up HGTMP", _strpath(self._hgtmp)) shutil.rmtree(self._hgtmp, True) for f in self._createdfiles: try: @@ -2757,8 +3393,13 @@ """Configure the environment to use the appropriate Python in tests.""" # Tests must use the same interpreter as us or bad things will happen. pyexename = sys.platform == 'win32' and b'python.exe' or b'python' - if getattr(os, 'symlink', None): - vlog("# Making python executable in test path a symlink to '%s'" % - sys.executable) + + # os.symlink() is a thing with py3 on Windows, but it requires + # Administrator rights. + if getattr(os, 'symlink', None) and os.name != 'nt': + vlog( + "# Making python executable in test path a symlink to '%s'" + % sysexecutable + ) mypython = os.path.join(self._tmpbindir, pyexename) try: @@ -2763,8 +3404,8 @@ mypython = os.path.join(self._tmpbindir, pyexename) try: - if os.readlink(mypython) == sys.executable: + if os.readlink(mypython) == sysexecutable: return os.unlink(mypython) except OSError as err: if err.errno != errno.ENOENT: raise @@ -2766,7 +3407,7 @@ return os.unlink(mypython) except OSError as err: if err.errno != errno.ENOENT: raise - if self._findprogram(pyexename) != sys.executable: + if self._findprogram(pyexename) != sysexecutable: try: @@ -2772,8 +3413,8 @@ try: - os.symlink(sys.executable, mypython) + os.symlink(sysexecutable, mypython) self._createdfiles.append(mypython) except OSError as err: # child processes may race, which is harmless if err.errno != errno.EEXIST: raise else: @@ -2774,12 +3415,14 @@ self._createdfiles.append(mypython) except OSError as err: # child processes may race, which is harmless if err.errno != errno.EEXIST: raise else: - exedir, exename = os.path.split(sys.executable) - vlog("# Modifying search path to find %s as %s in '%s'" % - (exename, pyexename, exedir)) + exedir, exename = os.path.split(sysexecutable) + vlog( + "# Modifying search path to find %s as %s in '%s'" + % (exename, pyexename, exedir) + ) path = os.environ['PATH'].split(os.pathsep) while exedir in path: path.remove(exedir) @@ -2804,7 +3447,7 @@ # Run installer in hg root script = os.path.realpath(sys.argv[0]) - exe = sys.executable + exe = sysexecutable if PYTHON3: compiler = _bytespath(compiler) script = _bytespath(script) @@ -2819,17 +3462,24 @@ # least on Windows for now, deal with .pydistutils.cfg bugs # when they happen. nohome = b'' - cmd = (b'%(exe)s setup.py %(pure)s clean --all' - b' build %(compiler)s --build-base="%(base)s"' - b' install --force --prefix="%(prefix)s"' - b' --install-lib="%(libdir)s"' - b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1' - % {b'exe': exe, b'pure': pure, - b'compiler': compiler, - b'base': os.path.join(self._hgtmp, b"build"), - b'prefix': self._installdir, b'libdir': self._pythondir, - b'bindir': self._bindir, - b'nohome': nohome, b'logfile': installerrs}) + cmd = ( + b'"%(exe)s" setup.py %(pure)s clean --all' + b' build %(compiler)s --build-base="%(base)s"' + b' install --force --prefix="%(prefix)s"' + b' --install-lib="%(libdir)s"' + b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1' + % { + b'exe': exe, + b'pure': pure, + b'compiler': compiler, + b'base': os.path.join(self._hgtmp, b"build"), + b'prefix': self._installdir, + b'libdir': self._pythondir, + b'bindir': self._bindir, + b'nohome': nohome, + b'logfile': installerrs, + } + ) # setuptools requires install directories to exist. def makedirs(p): @@ -2838,6 +3488,7 @@ except OSError as e: if e.errno != errno.EEXIST: raise + makedirs(self._pythondir) makedirs(self._bindir) @@ -2841,8 +3492,8 @@ makedirs(self._pythondir) makedirs(self._bindir) - vlog("# Running", cmd) - if os.system(cmd) == 0: + vlog("# Running", cmd.decode("utf-8")) + if subprocess.call(_strpath(cmd), shell=True) == 0: if not self.options.verbose: try: os.remove(installerrs) @@ -2861,7 +3512,7 @@ self._usecorrectpython() - if self.options.py3k_warnings and not self.options.anycoverage: + if self.options.py3_warnings and not self.options.anycoverage: vlog("# Updating hg command to enable Py3k Warnings switch") with open(os.path.join(self._bindir, 'hg'), 'rb') as f: lines = [line.rstrip() for line in f] @@ -2876,12 +3527,14 @@ # installation layout put it in bin/ directly. Fix it with open(hgbat, 'rb') as f: data = f.read() - if b'"%~dp0..\python" "%~dp0hg" %*' in data: - data = data.replace(b'"%~dp0..\python" "%~dp0hg" %*', - b'"%~dp0python" "%~dp0hg" %*') + if br'"%~dp0..\python" "%~dp0hg" %*' in data: + data = data.replace( + br'"%~dp0..\python" "%~dp0hg" %*', + b'"%~dp0python" "%~dp0hg" %*', + ) with open(hgbat, 'wb') as f: f.write(data) else: print('WARNING: cannot fix hg.bat reference to python.exe') if self.options.anycoverage: @@ -2882,10 +3535,12 @@ with open(hgbat, 'wb') as f: f.write(data) else: print('WARNING: cannot fix hg.bat reference to python.exe') if self.options.anycoverage: - custom = os.path.join(self._testdir, 'sitecustomize.py') - target = os.path.join(self._pythondir, 'sitecustomize.py') + custom = os.path.join( + osenvironb[b'RUNTESTDIR'], b'sitecustomize.py' + ) + target = os.path.join(self._pythondir, b'sitecustomize.py') vlog('# Installing coverage trigger to %s' % target) shutil.copyfile(custom, target) @@ -2890,4 +3545,4 @@ vlog('# Installing coverage trigger to %s' % target) shutil.copyfile(custom, target) - rc = os.path.join(self._testdir, '.coveragerc') + rc = os.path.join(self._testdir, b'.coveragerc') vlog('# Installing coverage rc to %s' % rc) @@ -2893,9 +3548,9 @@ vlog('# Installing coverage rc to %s' % rc) - os.environ['COVERAGE_PROCESS_START'] = rc - covdir = os.path.join(self._installdir, '..', 'coverage') + osenvironb[b'COVERAGE_PROCESS_START'] = rc + covdir = os.path.join(self._installdir, b'..', b'coverage') try: os.mkdir(covdir) except OSError as e: if e.errno != errno.EEXIST: raise @@ -2896,11 +3551,11 @@ try: os.mkdir(covdir) except OSError as e: if e.errno != errno.EEXIST: raise - os.environ['COVERAGE_DIR'] = covdir + osenvironb[b'COVERAGE_DIR'] = covdir def _checkhglib(self, verb): """Ensure that the 'mercurial' package imported by python is the one we expect it to be. If not, print a warning to stderr.""" @@ -2903,12 +3558,13 @@ def _checkhglib(self, verb): """Ensure that the 'mercurial' package imported by python is the one we expect it to be. If not, print a warning to stderr.""" - if ((self._bindir == self._pythondir) and - (self._bindir != self._tmpbindir)): + if (self._bindir == self._pythondir) and ( + self._bindir != self._tmpbindir + ): # The pythondir has been inferred from --with-hg flag. # We cannot expect anything sensible here. return expecthg = os.path.join(self._pythondir, b'mercurial') actualhg = self._gethgpath() if os.path.abspath(actualhg) != os.path.abspath(expecthg): @@ -2909,15 +3565,17 @@ # The pythondir has been inferred from --with-hg flag. # We cannot expect anything sensible here. return expecthg = os.path.join(self._pythondir, b'mercurial') actualhg = self._gethgpath() if os.path.abspath(actualhg) != os.path.abspath(expecthg): - sys.stderr.write('warning: %s with unexpected mercurial lib: %s\n' - ' (expected %s)\n' - % (verb, actualhg, expecthg)) + sys.stderr.write( + 'warning: %s with unexpected mercurial lib: %s\n' + ' (expected %s)\n' % (verb, actualhg, expecthg) + ) + def _gethgpath(self): """Return the path to the mercurial package that is actually found by the current Python interpreter.""" if self._hgpath is not None: return self._hgpath @@ -2918,10 +3576,10 @@ def _gethgpath(self): """Return the path to the mercurial package that is actually found by the current Python interpreter.""" if self._hgpath is not None: return self._hgpath - cmd = b'%s -c "import mercurial; print (mercurial.__path__[0])"' + cmd = b'"%s" -c "import mercurial; print (mercurial.__path__[0])"' cmd = cmd % PYTHON if PYTHON3: cmd = _strpath(cmd) @@ -2925,11 +3583,11 @@ cmd = cmd % PYTHON if PYTHON3: cmd = _strpath(cmd) - pipe = os.popen(cmd) - try: - self._hgpath = _bytespath(pipe.read().strip()) - finally: - pipe.close() + + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True) + out, err = p.communicate() + + self._hgpath = out.strip() return self._hgpath @@ -2938,8 +3596,9 @@ vlog('# Performing temporary installation of CHG') assert os.path.dirname(self._bindir) == self._installdir assert self._hgroot, 'must be called after _installhg()' - cmd = (b'"%(make)s" clean install PREFIX="%(prefix)s"' - % {b'make': 'make', # TODO: switch by option or environment? - b'prefix': self._installdir}) + cmd = b'"%(make)s" clean install PREFIX="%(prefix)s"' % { + b'make': b'make', # TODO: switch by option or environment? + b'prefix': self._installdir, + } cwd = os.path.join(self._hgroot, b'contrib', b'chg') vlog("# Running", cmd) @@ -2944,8 +3603,13 @@ cwd = os.path.join(self._hgroot, b'contrib', b'chg') vlog("# Running", cmd) - proc = subprocess.Popen(cmd, shell=True, cwd=cwd, - stdin=subprocess.PIPE, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) + proc = subprocess.Popen( + cmd, + shell=True, + cwd=cwd, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + ) out, _err = proc.communicate() if proc.returncode != 0: if PYTHON3: @@ -2957,9 +3621,10 @@ def _outputcoverage(self): """Produce code coverage output.""" import coverage + coverage = coverage.coverage vlog('# Producing coverage report') # chdir is the easiest way to get short, relative paths in the # output. os.chdir(self._hgroot) @@ -2960,10 +3625,10 @@ coverage = coverage.coverage vlog('# Producing coverage report') # chdir is the easiest way to get short, relative paths in the # output. os.chdir(self._hgroot) - covdir = os.path.join(self._installdir, '..', 'coverage') + covdir = os.path.join(_strpath(self._installdir), '..', 'coverage') cov = coverage(data_file=os.path.join(covdir, 'cov')) # Map install directory paths back to source directory. @@ -2967,7 +3632,7 @@ cov = coverage(data_file=os.path.join(covdir, 'cov')) # Map install directory paths back to source directory. - cov.config.paths['srcdir'] = ['.', self._pythondir] + cov.config.paths['srcdir'] = ['.', _strpath(self._pythondir)] cov.combine() @@ -2971,7 +3636,10 @@ cov.combine() - omit = [os.path.join(x, '*') for x in [self._bindir, self._testdir]] + omit = [ + _strpath(os.path.join(x, b'*')) + for x in [self._bindir, self._testdir] + ] cov.report(ignore_errors=True, omit=omit) if self.options.htmlcov: @@ -2975,6 +3643,6 @@ cov.report(ignore_errors=True, omit=omit) if self.options.htmlcov: - htmldir = os.path.join(self._outputdir, 'htmlcov') + htmldir = os.path.join(_strpath(self._outputdir), 'htmlcov') cov.html_report(directory=htmldir, omit=omit) if self.options.annotate: @@ -2979,6 +3647,6 @@ cov.html_report(directory=htmldir, omit=omit) if self.options.annotate: - adir = os.path.join(self._outputdir, 'annotated') + adir = os.path.join(_strpath(self._outputdir), 'annotated') if not os.path.isdir(adir): os.mkdir(adir) cov.annotate(directory=adir, omit=omit) @@ -2996,6 +3664,6 @@ def _checktools(self): """Ensure tools required to run tests are present.""" for p in self.REQUIREDTOOLS: - if os.name == 'nt' and not p.endswith('.exe'): - p += '.exe' + if os.name == 'nt' and not p.endswith(b'.exe'): + p += b'.exe' found = self._findprogram(p) @@ -3001,2 +3669,3 @@ found = self._findprogram(p) + p = p.decode("utf-8") if found: @@ -3002,3 +3671,3 @@ if found: - vlog("# Found prerequisite", p, "at", found) + vlog("# Found prerequisite", p, "at", _strpath(found)) else: @@ -3004,5 +3673,5 @@ else: - print("WARNING: Did not find prerequisite tool: %s " % - p.decode("utf-8")) + print("WARNING: Did not find prerequisite tool: %s " % p) + def aggregateexceptions(path): @@ -3007,7 +3676,9 @@ def aggregateexceptions(path): - exceptions = collections.Counter() + exceptioncounts = collections.Counter() + testsbyfailure = collections.defaultdict(set) + failuresbytest = collections.defaultdict(set) for f in os.listdir(path): with open(os.path.join(path, f), 'rb') as fh: data = fh.read().split(b'\0') @@ -3010,7 +3681,7 @@ for f in os.listdir(path): with open(os.path.join(path, f), 'rb') as fh: data = fh.read().split(b'\0') - if len(data) != 4: + if len(data) != 5: continue @@ -3015,7 +3686,7 @@ continue - exc, mainframe, hgframe, hgline = data + exc, mainframe, hgframe, hgline, testname = data exc = exc.decode('utf-8') mainframe = mainframe.decode('utf-8') hgframe = hgframe.decode('utf-8') hgline = hgline.decode('utf-8') @@ -3018,13 +3689,49 @@ exc = exc.decode('utf-8') mainframe = mainframe.decode('utf-8') hgframe = hgframe.decode('utf-8') hgline = hgline.decode('utf-8') - exceptions[(hgframe, hgline, exc)] += 1 - - return exceptions + testname = testname.decode('utf-8') + + key = (hgframe, hgline, exc) + exceptioncounts[key] += 1 + testsbyfailure[key].add(testname) + failuresbytest[testname].add(key) + + # Find test having fewest failures for each failure. + leastfailing = {} + for key, tests in testsbyfailure.items(): + fewesttest = None + fewestcount = 99999999 + for test in sorted(tests): + if len(failuresbytest[test]) < fewestcount: + fewesttest = test + fewestcount = len(failuresbytest[test]) + + leastfailing[key] = (fewestcount, fewesttest) + + # Create a combined counter so we can sort by total occurrences and + # impacted tests. + combined = {} + for key in exceptioncounts: + combined[key] = ( + exceptioncounts[key], + len(testsbyfailure[key]), + leastfailing[key][0], + leastfailing[key][1], + ) + + return { + 'exceptioncounts': exceptioncounts, + 'total': sum(exceptioncounts.values()), + 'combined': combined, + 'leastfailing': leastfailing, + 'byfailure': testsbyfailure, + 'bytest': failuresbytest, + } + if __name__ == '__main__': runner = TestRunner() try: import msvcrt @@ -3025,9 +3732,10 @@ if __name__ == '__main__': runner = TestRunner() try: import msvcrt + msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY) msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)