This instance will be upgraded to Heptapod 0.26.0rc1 on 2021-10-26 at 14:30 UTC+2 (a few minutes of down time)

Commit 2f2605fc authored by Raphaël Gomès's avatar Raphaël Gomès
Browse files

python3: use format-source to run byteify-strings in .py files

Using the format-source extension smooth out the pain of merging after
auto-formatting.

This change makes all of the Evolve test suite pass under python3 and has
added benefit of being 100% automated using mercurial's `byteify-strings`
script version 1.0 (revision 11498aa91c036c6d70f7ac5ee5af2664a84a1130).

How to benefit from the help of format-source is explained in the README.
parent 67567d7f1174
{"pattern": "glob:hgext3rd/**/*.py", "tool": "byteify-strings"}
exclude contrib
exclude .hg-format-source
recursive-exclude contrib *
exclude hgext3rd/evolve/hack
recursive-exclude hgext3rd/evolve/hack *
......
......@@ -145,3 +145,23 @@ expected test change only, no code change should happen there.
test output change from a changeset in core should adds the following line to their description:
CORE-TEST-OUTPUT-UPDATE: <CORE-NODE-ID>
Format-source config
====================
Format source helps smooth out the pain of merging after auto-formatting.
Follow the instructions for install here:
.. _`format-source`: https://bitbucket.org/octobus/format-source
Then update both your global and repo config files::
$ hg config -l # add the lines below
[extensions]
formatsource =
[format-source]
byteify-strings = python3 ~/workspace/octobus/mercurial-devel/contrib/byteify-strings.py --dictiter --treat-as-kwargs kwargs opts commitopts TROUBLES --allow-attr-methods
byteify-strings:mode.input = file
byteify-strings:mode.output = pipe
This diff is collapsed.
This diff is collapsed.
......@@ -62,20 +62,20 @@ def isobsnotesupported():
# Evolution renaming compat
TROUBLES = {
r'ORPHAN': 'orphan',
r'CONTENTDIVERGENT': 'content-divergent',
r'PHASEDIVERGENT': 'phase-divergent',
r'ORPHAN': b'orphan',
r'CONTENTDIVERGENT': b'content-divergent',
r'PHASEDIVERGENT': b'phase-divergent',
}
if util.safehasattr(uimod.ui, 'makeprogress'):
def progress(ui, topic, pos, item="", unit="", total=None):
def progress(ui, topic, pos, item=b"", unit=b"", total=None):
progress = ui.makeprogress(topic, unit, total)
if pos is not None:
progress.update(pos, item=item)
else:
progress.complete()
else:
def progress(ui, topic, pos, item="", unit="", total=None):
def progress(ui, topic, pos, item=b"", unit=b"", total=None):
ui.progress(topic, pos, item, unit, total)
# XXX: Better detection of property cache
......@@ -92,26 +92,26 @@ def memfilectx(repo, ctx, fctx, flags, copied, path):
if r"copysource" in varnames:
mctx = context.memfilectx(repo, ctx, fctx.path(), fctx.data(),
islink='l' in flags,
isexec='x' in flags,
islink=b'l' in flags,
isexec=b'x' in flags,
copysource=copied.get(path))
# compat with hg <- 4.9
elif varnames[2] == r"changectx":
mctx = context.memfilectx(repo, ctx, fctx.path(), fctx.data(),
islink='l' in flags,
isexec='x' in flags,
islink=b'l' in flags,
isexec=b'x' in flags,
copied=copied.get(path))
else:
mctx = context.memfilectx(repo, fctx.path(), fctx.data(),
islink='l' in flags,
isexec='x' in flags,
islink=b'l' in flags,
isexec=b'x' in flags,
copied=copied.get(path))
return mctx
def strdiff(a, b, fn1, fn2):
""" A version of mdiff.unidiff for comparing two strings
"""
args = [a, '', b, '', fn1, fn2]
args = [a, b'', b, b'', fn1, fn2]
# hg < 4.6 compat 8b6dd3922f70
if util.safehasattr(inspect, 'signature'):
......@@ -218,7 +218,7 @@ def fixedcopytracing(repo, c1, c2, base):
if limit is None:
# no common ancestor, no copies
return {}, {}, {}, {}, {}
repo.ui.debug(" searching for copies back to rev %d\n" % limit)
repo.ui.debug(b" searching for copies back to rev %d\n" % limit)
m1 = c1.manifest()
m2 = c2.manifest()
......@@ -232,18 +232,18 @@ def fixedcopytracing(repo, c1, c2, base):
# - incompletediverge = record divergent partial copies here
diverge = {} # divergence data is shared
incompletediverge = {}
data1 = {'copy': {},
'fullcopy': {},
'incomplete': {},
'diverge': diverge,
'incompletediverge': incompletediverge,
}
data2 = {'copy': {},
'fullcopy': {},
'incomplete': {},
'diverge': diverge,
'incompletediverge': incompletediverge,
}
data1 = {b'copy': {},
b'fullcopy': {},
b'incomplete': {},
b'diverge': diverge,
b'incompletediverge': incompletediverge,
}
data2 = {b'copy': {},
b'fullcopy': {},
b'incomplete': {},
b'diverge': diverge,
b'incompletediverge': incompletediverge,
}
# find interesting file sets from manifests
if hg48:
......@@ -260,20 +260,20 @@ def fixedcopytracing(repo, c1, c2, base):
else:
# unmatched file from base (DAG rotation in the graft case)
u1r, u2r = copies._computenonoverlap(repo, c1, c2, addedinm1, addedinm2,
baselabel='base')
baselabel=b'base')
# unmatched file from topological common ancestors (no DAG rotation)
# need to recompute this for directory move handling when grafting
mta = tca.manifest()
if hg48:
m1f = m1.filesnotin(mta, repo.narrowmatch())
m2f = m2.filesnotin(mta, repo.narrowmatch())
baselabel = 'topological common ancestor'
baselabel = b'topological common ancestor'
u1u, u2u = copies._computenonoverlap(repo, c1, c2, m1f, m2f,
baselabel=baselabel)
else:
u1u, u2u = copies._computenonoverlap(repo, c1, c2, m1.filesnotin(mta),
m2.filesnotin(mta),
baselabel='topological common ancestor')
baselabel=b'topological common ancestor')
for f in u1u:
copies._checkcopies(c1, c2, f, base, tca, dirtyc1, limit, data1)
......@@ -281,16 +281,16 @@ def fixedcopytracing(repo, c1, c2, base):
for f in u2u:
copies._checkcopies(c2, c1, f, base, tca, dirtyc2, limit, data2)
copy = dict(data1['copy'])
copy.update(data2['copy'])
fullcopy = dict(data1['fullcopy'])
fullcopy.update(data2['fullcopy'])
copy = dict(data1[b'copy'])
copy.update(data2[b'copy'])
fullcopy = dict(data1[b'fullcopy'])
fullcopy.update(data2[b'fullcopy'])
if dirtyc1:
copies._combinecopies(data2['incomplete'], data1['incomplete'], copy, diverge,
copies._combinecopies(data2[b'incomplete'], data1[b'incomplete'], copy, diverge,
incompletediverge)
else:
copies._combinecopies(data1['incomplete'], data2['incomplete'], copy, diverge,
copies._combinecopies(data1[b'incomplete'], data2[b'incomplete'], copy, diverge,
incompletediverge)
renamedelete = {}
......@@ -308,23 +308,23 @@ def fixedcopytracing(repo, c1, c2, base):
divergeset.update(fl) # reverse map for below
if bothnew:
repo.ui.debug(" unmatched files new in both:\n %s\n"
% "\n ".join(bothnew))
repo.ui.debug(b" unmatched files new in both:\n %s\n"
% b"\n ".join(bothnew))
bothdiverge = {}
bothincompletediverge = {}
remainder = {}
both1 = {'copy': {},
'fullcopy': {},
'incomplete': {},
'diverge': bothdiverge,
'incompletediverge': bothincompletediverge
}
both2 = {'copy': {},
'fullcopy': {},
'incomplete': {},
'diverge': bothdiverge,
'incompletediverge': bothincompletediverge
}
both1 = {b'copy': {},
b'fullcopy': {},
b'incomplete': {},
b'diverge': bothdiverge,
b'incompletediverge': bothincompletediverge
}
both2 = {b'copy': {},
b'fullcopy': {},
b'incomplete': {},
b'diverge': bothdiverge,
b'incompletediverge': bothincompletediverge
}
for f in bothnew:
copies._checkcopies(c1, c2, f, base, tca, dirtyc1, limit, both1)
copies._checkcopies(c2, c1, f, base, tca, dirtyc2, limit, both2)
......@@ -333,17 +333,17 @@ def fixedcopytracing(repo, c1, c2, base):
pass
elif dirtyc1:
# incomplete copies may only be found on the "dirty" side for bothnew
assert not both2['incomplete']
remainder = copies._combinecopies({}, both1['incomplete'], copy, bothdiverge,
assert not both2[b'incomplete']
remainder = copies._combinecopies({}, both1[b'incomplete'], copy, bothdiverge,
bothincompletediverge)
elif dirtyc2:
assert not both1['incomplete']
remainder = copies._combinecopies({}, both2['incomplete'], copy, bothdiverge,
assert not both1[b'incomplete']
remainder = copies._combinecopies({}, both2[b'incomplete'], copy, bothdiverge,
bothincompletediverge)
else:
# incomplete copies and divergences can't happen outside grafts
assert not both1['incomplete']
assert not both2['incomplete']
assert not both1[b'incomplete']
assert not both2[b'incomplete']
assert not bothincompletediverge
for f in remainder:
assert f not in bothdiverge
......@@ -356,30 +356,30 @@ def fixedcopytracing(repo, c1, c2, base):
copy[fl[0]] = of # not actually divergent, just matching renames
if fullcopy and repo.ui.debugflag:
repo.ui.debug(" all copies found (* = to merge, ! = divergent, "
"% = renamed and deleted):\n")
repo.ui.debug(b" all copies found (* = to merge, ! = divergent, "
b"% = renamed and deleted):\n")
for f in sorted(fullcopy):
note = ""
note = b""
if f in copy:
note += "*"
note += b"*"
if f in divergeset:
note += "!"
note += b"!"
if f in renamedeleteset:
note += "%"
repo.ui.debug(" src: '%s' -> dst: '%s' %s\n" % (fullcopy[f], f,
note))
note += b"%"
repo.ui.debug(b" src: '%s' -> dst: '%s' %s\n" % (fullcopy[f], f,
note))
del divergeset
if not fullcopy:
return copy, {}, diverge, renamedelete, {}
repo.ui.debug(" checking for directory renames\n")
repo.ui.debug(b" checking for directory renames\n")
# generate a directory move map
d1, d2 = c1.dirs(), c2.dirs()
# Hack for adding '', which is not otherwise added, to d1 and d2
d1.addpath('/')
d2.addpath('/')
d1.addpath(b'/')
d2.addpath(b'/')
invalid = set()
dirmove = {}
......@@ -392,16 +392,16 @@ def fixedcopytracing(repo, c1, c2, base):
continue
elif dsrc in d1 and ddst in d1:
# directory wasn't entirely moved locally
invalid.add(dsrc + "/")
invalid.add(dsrc + b"/")
elif dsrc in d2 and ddst in d2:
# directory wasn't entirely moved remotely
invalid.add(dsrc + "/")
elif dsrc + "/" in dirmove and dirmove[dsrc + "/"] != ddst + "/":
invalid.add(dsrc + b"/")
elif dsrc + b"/" in dirmove and dirmove[dsrc + b"/"] != ddst + b"/":
# files from the same directory moved to two different places
invalid.add(dsrc + "/")
invalid.add(dsrc + b"/")
else:
# looks good so far
dirmove[dsrc + "/"] = ddst + "/"
dirmove[dsrc + b"/"] = ddst + b"/"
for i in invalid:
if i in dirmove:
......@@ -412,7 +412,7 @@ def fixedcopytracing(repo, c1, c2, base):
return copy, {}, diverge, renamedelete, {}
for d in dirmove:
repo.ui.debug(" discovered dir src: '%s' -> dst: '%s'\n" %
repo.ui.debug(b" discovered dir src: '%s' -> dst: '%s'\n" %
(d, dirmove[d]))
movewithdir = {}
......@@ -425,8 +425,8 @@ def fixedcopytracing(repo, c1, c2, base):
df = dirmove[d] + f[len(d):]
if df not in copy:
movewithdir[f] = df
repo.ui.debug((" pending file src: '%s' -> "
"dst: '%s'\n") % (f, df))
repo.ui.debug((b" pending file src: '%s' -> "
b"dst: '%s'\n") % (f, df))
break
return copy, movewithdir, diverge, renamedelete, dirmove
......@@ -494,8 +494,8 @@ def markersusers(markers):
return obsutil.markersusers(markers)
markersmeta = [dict(m[3]) for m in markers]
users = set(encoding.tolocal(meta['user']) for meta in markersmeta
if meta.get('user'))
users = set(encoding.tolocal(meta[b'user']) for meta in markersmeta
if meta.get(b'user'))
return sorted(users)
......@@ -511,7 +511,7 @@ def markersoperations(markers):
return obsutil.markersoperations(markers)
markersmeta = [dict(m[3]) for m in markers]
operations = set(meta.get('operation') for meta in markersmeta
if meta.get('operation'))
operations = set(meta.get(b'operation') for meta in markersmeta
if meta.get(b'operation'))
return sorted(operations)
......@@ -140,7 +140,7 @@ class revlogbaseddag(basedag):
def _internalize(self, id):
ix = self._revlog.rev(id)
if ix == nullrev:
raise LookupError(id, self._revlog.indexfile, _('nullid'))
raise LookupError(id, self._revlog.indexfile, _(b'nullid'))
return ix
def _internalizeall(self, ids, filterunknown):
......
......@@ -45,7 +45,7 @@ def cmddebugobsstorestat(ui, repo):
unfi = repo.unfiltered()
nm = unfi.changelog.nodemap
nbmarkers = len(store._all)
ui.write(_('markers total: %9i\n') % nbmarkers)
ui.write(_(b'markers total: %9i\n') % nbmarkers)
sucscount = [0, 0, 0, 0]
known = 0
parentsdata = 0
......@@ -67,7 +67,7 @@ def cmddebugobsstorestat(ui, repo):
metakeys.setdefault(key, 0)
metakeys[key] += 1
meta = dict(meta)
parents = [meta.get('p1'), meta.get('p2')]
parents = [meta.get(b'p1'), meta.get(b'p2')]
parents = [node.bin(p) for p in parents if p is not None]
if parents:
parentsdata += 1
......@@ -91,71 +91,71 @@ def cmddebugobsstorestat(ui, repo):
fc = (frozenset(c[0]), frozenset(c[1]))
for n in fc[0]:
pclustersmap[n] = fc
numobs = len(unfi.revs('obsolete()'))
numobs = len(unfi.revs(b'obsolete()'))
numtotal = len(unfi)
ui.write((' for known precursors: %9i' % known))
ui.write((' (%i/%i obsolete changesets)\n' % (numobs, numtotal)))
ui.write((' with parents data: %9i\n' % parentsdata))
ui.write((b' for known precursors: %9i' % known))
ui.write((b' (%i/%i obsolete changesets)\n' % (numobs, numtotal)))
ui.write((b' with parents data: %9i\n' % parentsdata))
# successors data
ui.write(('markers with no successors: %9i\n' % sucscount[0]))
ui.write((' 1 successors: %9i\n' % sucscount[1]))
ui.write((' 2 successors: %9i\n' % sucscount[2]))
ui.write((' more than 2 successors: %9i\n' % sucscount[3]))
ui.write((b'markers with no successors: %9i\n' % sucscount[0]))
ui.write((b' 1 successors: %9i\n' % sucscount[1]))
ui.write((b' 2 successors: %9i\n' % sucscount[2]))
ui.write((b' more than 2 successors: %9i\n' % sucscount[3]))
# meta data info
ui.write((' available keys:\n'))
ui.write((b' available keys:\n'))
for key in sorted(metakeys):
ui.write((' %15s: %9i\n' % (key, metakeys[key])))
ui.write((b' %15s: %9i\n' % (key, metakeys[key])))
size_v0.sort()
size_v1.sort()
if size_v0:
ui.write('marker size:\n')
ui.write(b'marker size:\n')
# format v1
ui.write(' format v1:\n')
ui.write((' smallest length: %9i\n' % size_v1[0]))
ui.write((' longer length: %9i\n' % size_v1[-1]))
ui.write(b' format v1:\n')
ui.write((b' smallest length: %9i\n' % size_v1[0]))
ui.write((b' longer length: %9i\n' % size_v1[-1]))
median = size_v1[nbmarkers // 2]
ui.write((' median length: %9i\n' % median))
ui.write((b' median length: %9i\n' % median))
mean = sum(size_v1) // nbmarkers
ui.write((' mean length: %9i\n' % mean))
ui.write((b' mean length: %9i\n' % mean))
# format v0
ui.write(' format v0:\n')
ui.write((' smallest length: %9i\n' % size_v0[0]))
ui.write((' longer length: %9i\n' % size_v0[-1]))
ui.write(b' format v0:\n')
ui.write((b' smallest length: %9i\n' % size_v0[0]))
ui.write((b' longer length: %9i\n' % size_v0[-1]))
median = size_v0[nbmarkers // 2]
ui.write((' median length: %9i\n' % median))
ui.write((b' median length: %9i\n' % median))
mean = sum(size_v0) // nbmarkers
ui.write((' mean length: %9i\n' % mean))
ui.write((b' mean length: %9i\n' % mean))
allclusters = list(set(clustersmap.values()))
allclusters.sort(key=lambda x: len(x[1]))
ui.write(('disconnected clusters: %9i\n' % len(allclusters)))
ui.write((b'disconnected clusters: %9i\n' % len(allclusters)))
ui.write(' any known node: %9i\n'
ui.write(b' any known node: %9i\n'
% len([c for c in allclusters
if [n for n in c[0] if nm.get(n) is not None]]))
if allclusters:
nbcluster = len(allclusters)
ui.write((' smallest length: %9i\n' % len(allclusters[0][1])))
ui.write((' longer length: %9i\n'
% len(allclusters[-1][1])))
ui.write((b' smallest length: %9i\n' % len(allclusters[0][1])))
ui.write((b' longer length: %9i\n'
% len(allclusters[-1][1])))
median = len(allclusters[nbcluster // 2][1])
ui.write((' median length: %9i\n' % median))
ui.write((b' median length: %9i\n' % median))
mean = sum(len(x[1]) for x in allclusters) // nbcluster
ui.write((' mean length: %9i\n' % mean))
ui.write((b' mean length: %9i\n' % mean))
allpclusters = list(set(pclustersmap.values()))
allpclusters.sort(key=lambda x: len(x[1]))
ui.write((' using parents data: %9i\n' % len(allpclusters)))
ui.write(' any known node: %9i\n'
ui.write((b' using parents data: %9i\n' % len(allpclusters)))
ui.write(b' any known node: %9i\n'
% len([c for c in allclusters
if [n for n in c[0] if nm.get(n) is not None]]))
if allpclusters:
nbcluster = len(allpclusters)
ui.write((' smallest length: %9i\n'
% len(allpclusters[0][1])))
ui.write((' longer length: %9i\n'
% len(allpclusters[-1][1])))
ui.write((b' smallest length: %9i\n'
% len(allpclusters[0][1])))
ui.write((b' longer length: %9i\n'
% len(allpclusters[-1][1])))
median = len(allpclusters[nbcluster // 2][1])
ui.write((' median length: %9i\n' % median))
ui.write((b' median length: %9i\n' % median))
mean = sum(len(x[1]) for x in allpclusters) // nbcluster
ui.write((' mean length: %9i\n' % mean))
ui.write((b' mean length: %9i\n' % mean))
......@@ -32,7 +32,7 @@ eh = exthelper.exthelper()
def simpledepth(repo, rev):
"""simple but obviously right implementation of depth"""
return len(repo.revs('::%d', rev))
return len(repo.revs(b'::%d', rev))
@eh.command(
b'debugdepth',
......@@ -46,25 +46,25 @@ def debugdepth(ui, repo, **opts):
"""
revs = scmutil.revrange(repo, opts['rev'])
method = opts['method']
if method in ('cached', 'compare'):
if method in (b'cached', b'compare'):
cache = repo.depthcache
cache.save(repo)
for r in revs:
ctx = repo[r]
if method == 'simple':
if method == b'simple':
depth = simpledepth(repo, r)
elif method == 'cached':
elif method == b'cached':
depth = cache.get(r)
elif method == 'compare':
elif method == b'compare':
simple = simpledepth(repo, r)
cached = cache.get(r)
if simple != cached:
raise error.Abort('depth differ for revision %s: %d != %d'
raise error.Abort(b'depth differ for revision %s: %d != %d'
% (ctx, simple, cached))
depth = simple
else:
raise error.Abort('unknown method "%s"' % method)
ui.write('%s %d\n' % (ctx, depth))
raise error.Abort(b'unknown method "%s"' % method)
ui.write(b'%s %d\n' % (ctx, depth))
@eh.reposetup
def setupcache(ui, repo):
......@@ -94,8 +94,8 @@ def setupcache(ui, repo):
class depthcache(genericcaches.changelogsourcebase):
_filepath = 'evoext-depthcache-00'
_cachename = 'evo-ext-depthcache'
_filepath = b'evoext-depthcache-00'
_cachename = b'evo-ext-depthcache'
def __init__(self):
super(depthcache, self).__init__()
......@@ -103,7 +103,7 @@ class depthcache(genericcaches.changelogsourcebase):
def get(self, rev):
if len(self._data) <= rev:
raise error.ProgrammingError('depthcache must be warmed before use')
raise error.ProgrammingError(b'depthcache must be warmed before use')
return self._data[rev]
def _updatefrom(self, repo, data):
......@@ -113,9 +113,9 @@ class depthcache(genericcaches.changelogsourcebase):
total = len(data)
def progress(pos, rev=None):
revstr = '' if rev is None else ('rev %d' % rev)
compat.progress(repo.ui, 'updating depth cache',
pos, revstr, unit='revision', total=total)
revstr = b'' if rev is None else (b'rev %d' % rev)
compat.progress(repo.ui, b'updating depth cache',
pos, revstr, unit=b'revision', total=total)
progress(0)
for idx, rev in enumerate(data, 1):
assert rev == len(self._data), (rev, len(self._data))
......@@ -199,12 +199,12 @@ class depthcache(genericcaches.changelogsourcebase):
return
try:
cachefile = repo.cachevfs(self._filepath, 'w', atomictemp=True)
cachefile = repo.cachevfs(self._filepath, b'w', atomictemp=True)
headerdata = self._serializecachekey()
cachefile.write(headerdata)
cachefile.write(compat.arraytobytes(self._data))
cachefile.close()
self._ondiskkey = self._cachekey
except (IOError, OSError) as exc:
repo.ui.log('depthcache', 'could not write update %s\n' % exc)
repo.ui.debug('depthcache: could not write update %s\n' % exc)
repo.ui.log(b'depthcache', b'could not write update %s\n' % exc)
repo.ui.debug(b'depthcache: could not write update %s\n' % exc)
This diff is collapsed.
......@@ -83,7 +83,7 @@ class exthelper(object):
self._duckpunchers = []
self.cmdtable = {}
self.command = registrar.command(self.cmdtable)
if '^init' in commands.table:
if b'^init' in commands.table:
olddoregister = self.command._doregister
def _newdoregister(self, name, *args, **kwargs):
......@@ -277,9 +277,9 @@ class exthelper(object):
else:
for opt in opts:
if not isinstance(opt, tuple):
raise error.ProgrammingError('opts must be list of tuples')
raise error.ProgrammingError(b'opts must be list of tuples')
if len(opt) not in (4, 5):
msg = 'each opt tuple must contain 4 or 5 values'
msg = b'each opt tuple must contain 4 or 5 values'
raise error.ProgrammingError(msg)
</