Read about our upcoming Code of Conduct on this issue

Commit e48f179d authored by Pulkit Goyal's avatar Pulkit Goyal
Browse files

merge with default

--HG--
branch : stable
Pipeline #28777 passed with stages
in 59 minutes and 6 seconds
......@@ -37,9 +37,9 @@ botocore==1.12.243 \
--hash=sha256:397585a7881230274afb8d1877ef69a661b0a311745cd324f14a052fb2a2863a \
--hash=sha256:4496f8da89cb496462a831897ad248e13e431d9fa7e41e06d426fd6658ab6e59 \
# via boto3, s3transfer
certifi==2019.9.11 \
--hash=sha256:e4f3620cfea4f83eedc95b24abd9cd56f3c4b146dd0177e83a21b4eb49e21e50 \
--hash=sha256:fd7c7c74727ddcf00e9acd26bba8da604ffec95bf1c2144e67aff7a8b50e6cef \
certifi==2021.5.30 \
--hash=sha256:2bbf76fd432960138b3ef6dda3dde0544f27cbf8546c458e60baf371917ba9ee \
--hash=sha256:50b1e4f8446b06f41be7dd6338db18e0990601dce795c2b1686458aa7e8fa7d8 \
# via requests
cffi==1.12.3 \
--hash=sha256:041c81822e9f84b1d9c401182e174996f0bae9991f33725d059b771744290774 \
......
# dirstatenonnormalcheck.py - extension to check the consistency of the
# dirstate's non-normal map
#
# For most operations on dirstate, this extensions checks that the nonnormalset
# contains the right entries.
# It compares the nonnormal file to a nonnormalset built from the map of all
# the files in the dirstate to check that they contain the same files.
from __future__ import absolute_import
from mercurial import (
dirstate,
extensions,
pycompat,
)
def nonnormalentries(dmap):
"""Compute nonnormal entries from dirstate's dmap"""
res = set()
for f, e in dmap.iteritems():
if e.state != b'n' or e.mtime == -1:
res.add(f)
return res
def checkconsistency(ui, orig, dmap, _nonnormalset, label):
"""Compute nonnormalset from dmap, check that it matches _nonnormalset"""
nonnormalcomputedmap = nonnormalentries(dmap)
if _nonnormalset != nonnormalcomputedmap:
b_orig = pycompat.sysbytes(repr(orig))
ui.develwarn(b"%s call to %s\n" % (label, b_orig), config=b'dirstate')
ui.develwarn(b"inconsistency in nonnormalset\n", config=b'dirstate')
b_nonnormal = pycompat.sysbytes(repr(_nonnormalset))
ui.develwarn(b"[nonnormalset] %s\n" % b_nonnormal, config=b'dirstate')
b_nonnormalcomputed = pycompat.sysbytes(repr(nonnormalcomputedmap))
ui.develwarn(b"[map] %s\n" % b_nonnormalcomputed, config=b'dirstate')
def _checkdirstate(orig, self, *args, **kwargs):
"""Check nonnormal set consistency before and after the call to orig"""
checkconsistency(
self._ui, orig, self._map, self._map.nonnormalset, b"before"
)
r = orig(self, *args, **kwargs)
checkconsistency(
self._ui, orig, self._map, self._map.nonnormalset, b"after"
)
return r
def extsetup(ui):
"""Wrap functions modifying dirstate to check nonnormalset consistency"""
dirstatecl = dirstate.dirstate
devel = ui.configbool(b'devel', b'all-warnings')
paranoid = ui.configbool(b'experimental', b'nonnormalparanoidcheck')
if devel:
extensions.wrapfunction(dirstatecl, '_writedirstate', _checkdirstate)
if paranoid:
# We don't do all these checks when paranoid is disable as it would
# make the extension run very slowly on large repos
extensions.wrapfunction(dirstatecl, 'normallookup', _checkdirstate)
extensions.wrapfunction(dirstatecl, 'otherparent', _checkdirstate)
extensions.wrapfunction(dirstatecl, 'normal', _checkdirstate)
extensions.wrapfunction(dirstatecl, 'write', _checkdirstate)
extensions.wrapfunction(dirstatecl, 'add', _checkdirstate)
extensions.wrapfunction(dirstatecl, 'remove', _checkdirstate)
extensions.wrapfunction(dirstatecl, 'merge', _checkdirstate)
extensions.wrapfunction(dirstatecl, 'drop', _checkdirstate)
......@@ -4,9 +4,9 @@
#
# pip-compile --generate-hashes --output-file=contrib/packaging/requirements-windows-py2.txt contrib/packaging/requirements-windows.txt.in
#
certifi==2020.6.20 \
--hash=sha256:5930595817496dd21bb8dc35dad090f1c2cd0adfaf21204bf6732ca5d8ee34d3 \
--hash=sha256:8fc0819f1f30ba15bdb34cceffb9ef04d99f420f68eb75d901e9560b8749fc41 \
certifi==2021.5.30 \
--hash=sha256:2bbf76fd432960138b3ef6dda3dde0544f27cbf8546c458e60baf371917ba9ee \
--hash=sha256:50b1e4f8446b06f41be7dd6338db18e0990601dce795c2b1686458aa7e8fa7d8 \
# via dulwich
configparser==4.0.2 \
--hash=sha256:254c1d9c79f60c45dfde850850883d5aaa7f19a23f13561243a050d5a7c3fe4c \
......
......@@ -16,9 +16,9 @@ cached-property==1.5.2 \
--hash=sha256:9fa5755838eecbb2d234c3aa390bd80fbd3ac6b6869109bfc1b499f7bd89a130 \
--hash=sha256:df4f613cf7ad9a588cc381aaf4a512d26265ecebd5eb9e1ba12f1319eb85a6a0 \
# via pygit2
certifi==2020.6.20 \
--hash=sha256:5930595817496dd21bb8dc35dad090f1c2cd0adfaf21204bf6732ca5d8ee34d3 \
--hash=sha256:8fc0819f1f30ba15bdb34cceffb9ef04d99f420f68eb75d901e9560b8749fc41 \
certifi==2021.5.30 \
--hash=sha256:2bbf76fd432960138b3ef6dda3dde0544f27cbf8546c458e60baf371917ba9ee \
--hash=sha256:50b1e4f8446b06f41be7dd6338db18e0990601dce795c2b1686458aa7e8fa7d8 \
# via dulwich
cffi==1.14.4 \
--hash=sha256:00a1ba5e2e95684448de9b89888ccd02c98d512064b4cb987d48f4b40aa0421e \
......
......@@ -57,10 +57,10 @@ from mercurial import (
diffutil,
error,
hg,
logcmdutil,
patch,
pycompat,
registrar,
scmutil,
)
from mercurial.utils import dateutil
......@@ -180,7 +180,7 @@ def analyze(ui, repo, *revs, **opts):
# If a mercurial repo is available, also model the commit history.
if repo:
revs = scmutil.revrange(repo, revs)
revs = logcmdutil.revrange(repo, revs)
revs.sort()
progress = ui.makeprogress(
......
......@@ -35,6 +35,7 @@ from mercurial.node import short
from mercurial import (
error,
logcmdutil,
registrar,
scmutil,
)
......@@ -84,7 +85,7 @@ def _docensor(ui, repo, path, rev=b'', tombstone=b'', **opts):
if not len(flog):
raise error.Abort(_(b'cannot censor file with no history'))
rev = scmutil.revsingle(repo, rev, rev).rev()
rev = logcmdutil.revsingle(repo, rev, rev).rev()
try:
ctx = repo[rev]
except KeyError:
......
......@@ -22,7 +22,6 @@ from mercurial import (
logcmdutil,
pycompat,
registrar,
scmutil,
)
templateopts = cmdutil.templateopts
......@@ -71,7 +70,7 @@ def children(ui, repo, file_=None, **opts):
"""
opts = pycompat.byteskwargs(opts)
rev = opts.get(b'rev')
ctx = scmutil.revsingle(repo, rev)
ctx = logcmdutil.revsingle(repo, rev)
if file_:
fctx = repo.filectx(file_, changeid=ctx.rev())
childctxs = [fcctx.changectx() for fcctx in fctx.children()]
......
......@@ -13,9 +13,9 @@ from mercurial import (
cmdutil,
context,
error,
logcmdutil,
pycompat,
registrar,
scmutil,
)
cmdtable = {}
......@@ -68,7 +68,7 @@ def close_branch(ui, repo, *revs, **opts):
opts = pycompat.byteskwargs(opts)
revs += tuple(opts.get(b'rev', []))
revs = scmutil.revrange(repo, revs)
revs = logcmdutil.revrange(repo, revs)
if not revs:
raise error.Abort(_(b'no revisions specified'))
......
......@@ -36,10 +36,10 @@ from mercurial import (
exchange,
hg,
lock as lockmod,
logcmdutil,
merge as mergemod,
phases,
pycompat,
scmutil,
util,
)
from mercurial.utils import dateutil
......@@ -145,7 +145,7 @@ class mercurial_sink(common.converter_sink):
_(b'pulling from %s into %s\n') % (pbranch, branch)
)
exchange.pull(
self.repo, prepo, [prepo.lookup(h) for h in heads]
self.repo, prepo, heads=[prepo.lookup(h) for h in heads]
)
self.before()
......@@ -564,7 +564,7 @@ class mercurial_source(common.converter_source):
)
nodes = set()
parents = set()
for r in scmutil.revrange(self.repo, [hgrevs]):
for r in logcmdutil.revrange(self.repo, [hgrevs]):
ctx = self.repo[r]
nodes.add(ctx.node())
parents.update(p.node() for p in ctx.parents())
......
......@@ -423,7 +423,7 @@ def reposetup(ui, repo):
try:
wlock = self.wlock()
for f in self.dirstate:
if self.dirstate[f] != b'n':
if not self.dirstate.get_entry(f).maybe_clean:
continue
if oldeol is not None:
if not oldeol.match(f) and not neweol.match(f):
......
......@@ -101,6 +101,7 @@ from mercurial import (
error,
filemerge,
formatter,
logcmdutil,
pycompat,
registrar,
scmutil,
......@@ -558,17 +559,17 @@ def dodiff(ui, repo, cmdline, pats, opts, guitool=False):
do3way = b'$parent2' in cmdline
if change:
ctx2 = scmutil.revsingle(repo, change, None)
ctx2 = logcmdutil.revsingle(repo, change, None)
ctx1a, ctx1b = ctx2.p1(), ctx2.p2()
elif from_rev or to_rev:
repo = scmutil.unhidehashlikerevs(
repo, [from_rev] + [to_rev], b'nowarn'
)
ctx1a = scmutil.revsingle(repo, from_rev, None)
ctx1a = logcmdutil.revsingle(repo, from_rev, None)
ctx1b = repo[nullrev]
ctx2 = scmutil.revsingle(repo, to_rev, None)
ctx2 = logcmdutil.revsingle(repo, to_rev, None)
else:
ctx1a, ctx2 = scmutil.revpair(repo, revs)
ctx1a, ctx2 = logcmdutil.revpair(repo, revs)
if not revs:
ctx1b = repo[None].p2()
else:
......
......@@ -15,6 +15,7 @@ from mercurial import (
encoding,
error,
extensions,
logcmdutil,
patch,
pycompat,
registrar,
......@@ -75,7 +76,7 @@ def _matchpaths(repo, rev, pats, opts, aopts=facontext.defaultopts):
def bad(x, y):
raise error.Abort(b"%s: %s" % (x, y))
ctx = scmutil.revsingle(repo, rev)
ctx = logcmdutil.revsingle(repo, rev)
m = scmutil.match(ctx, pats, opts, badfn=bad)
for p in ctx.walk(m):
yield p
......@@ -317,7 +318,7 @@ def debugbuildannotatecache(ui, repo, *pats, **opts):
)
if ui.configbool(b'fastannotate', b'unfilteredrepo'):
repo = repo.unfiltered()
ctx = scmutil.revsingle(repo, rev)
ctx = logcmdutil.revsingle(repo, rev)
m = scmutil.match(ctx, pats, opts)
paths = list(ctx.walk(m))
if util.safehasattr(repo, 'prefetchfastannotate'):
......
......@@ -140,12 +140,10 @@ def peersetup(ui, peer):
def getannotate(self, path, lastnode=None):
if not self.capable(b'getannotate'):
ui.warn(_(b'remote peer cannot provide annotate cache\n'))
yield None, None
return None, None
else:
args = {b'path': path, b'lastnode': lastnode or b''}
f = wireprotov1peer.future()
yield args, f
yield _parseresponse(f.value)
return args, _parseresponse
peer.__class__ = fastannotatepeer
......
......@@ -15,6 +15,7 @@ from mercurial.node import hex, nullrev
from mercurial.utils import stringutil
from mercurial import (
error,
logcmdutil,
pycompat,
registrar,
scmutil,
......@@ -182,7 +183,7 @@ def fastexport(ui, repo, *revs, **opts):
if not revs:
revs = scmutil.revrange(repo, [b":"])
else:
revs = scmutil.revrange(repo, revs)
revs = logcmdutil.revrange(repo, revs)
if not revs:
raise error.Abort(_(b"no revisions matched"))
authorfile = opts.get(b"authormap")
......
......@@ -144,6 +144,7 @@ from mercurial import (
context,
copies,
error,
logcmdutil,
match as matchmod,
mdiff,
merge,
......@@ -283,20 +284,29 @@ def fix(ui, repo, *pats, **opts):
# There are no data dependencies between the workers fixing each file
# revision, so we can use all available parallelism.
def getfixes(items):
for rev, path in items:
ctx = repo[rev]
for srcrev, path, dstrevs in items:
ctx = repo[srcrev]
olddata = ctx[path].data()
metadata, newdata = fixfile(
ui, repo, opts, fixers, ctx, path, basepaths, basectxs[rev]
)
# Don't waste memory/time passing unchanged content back, but
# produce one result per item either way.
yield (
rev,
ui,
repo,
opts,
fixers,
ctx,
path,
metadata,
newdata if newdata != olddata else None,
basepaths,
basectxs[srcrev],
)
# We ungroup the work items now, because the code that consumes
# these results has to handle each dstrev separately, and in
# topological order. Because these are handled in topological
# order, it's important that we pass around references to
# "newdata" instead of copying it. Otherwise, we would be
# keeping more copies of file content in memory at a time than
# if we hadn't bothered to group/deduplicate the work items.
data = newdata if newdata != olddata else None
for dstrev in dstrevs:
yield (dstrev, path, metadata, data)
results = worker.worker(
ui, 1.0, getfixes, tuple(), workqueue, threadsafe=False
......@@ -376,23 +386,32 @@ def cleanup(repo, replacements, wdirwritten):
def getworkqueue(ui, repo, pats, opts, revstofix, basectxs):
"""Constructs the list of files to be fixed at specific revisions
It is up to the caller how to consume the work items, and the only
dependence between them is that replacement revisions must be committed in
topological order. Each work item represents a file in the working copy or
in some revision that should be fixed and written back to the working copy
or into a replacement revision.
Work items for the same revision are grouped together, so that a worker
pool starting with the first N items in parallel is likely to finish the
first revision's work before other revisions. This can allow us to write
the result to disk and reduce memory footprint. At time of writing, the
partition strategy in worker.py seems favorable to this. We also sort the
items by ascending revision number to match the order in which we commit
the fixes later.
"""Constructs a list of files to fix and which revisions each fix applies to
To avoid duplicating work, there is usually only one work item for each file
revision that might need to be fixed. There can be multiple work items per
file revision if the same file needs to be fixed in multiple changesets with
different baserevs. Each work item also contains a list of changesets where
the file's data should be replaced with the fixed data. The work items for
earlier changesets come earlier in the work queue, to improve pipelining by
allowing the first changeset to be replaced while fixes are still being
computed for later changesets.
Also returned is a map from changesets to the count of work items that might
affect each changeset. This is used later to count when all of a changeset's
work items have been finished, without having to inspect the remaining work
queue in each worker subprocess.
The example work item (1, "foo/bar.txt", (1, 2, 3)) means that the data of
bar.txt should be read from revision 1, then fixed, and written back to
revisions 1, 2 and 3. Revision 1 is called the "srcrev" and the list of
revisions is called the "dstrevs". In practice the srcrev is always one of
the dstrevs, and we make that choice when constructing the work item so that
the choice can't be made inconsistently later on. The dstrevs should all
have the same file revision for the given path, so the choice of srcrev is
arbitrary. The wdirrev can be a dstrev and a srcrev.
"""
workqueue = []
dstrevmap = collections.defaultdict(list)
numitems = collections.defaultdict(int)
maxfilesize = ui.configbytes(b'fix', b'maxfilesize')
for rev in sorted(revstofix):
......@@ -410,8 +429,21 @@ def getworkqueue(ui, repo, pats, opts, revstofix, basectxs):
% (util.bytecount(maxfilesize), path)
)
continue
workqueue.append((rev, path))
baserevs = tuple(ctx.rev() for ctx in basectxs[rev])
dstrevmap[(fctx.filerev(), baserevs, path)].append(rev)
numitems[rev] += 1
workqueue = [
(min(dstrevs), path, dstrevs)
for (_filerev, _baserevs, path), dstrevs in dstrevmap.items()
]
# Move work items for earlier changesets to the front of the queue, so we
# might be able to replace those changesets (in topological order) while
# we're still processing later work items. Note the min() in the previous
# expression, which means we don't need a custom comparator here. The path
# is also important in the sort order to make the output order stable. There
# are some situations where this doesn't help much, but some situations
# where it lets us buffer O(1) files instead of O(n) files.
workqueue.sort()
return workqueue, numitems
......@@ -420,7 +452,7 @@ def getrevstofix(ui, repo, opts):
if opts[b'all']:
revs = repo.revs(b'(not public() and not obsolete()) or wdir()')
elif opts[b'source']:
source_revs = scmutil.revrange(repo, opts[b'source'])
source_revs = logcmdutil.revrange(repo, opts[b'source'])
revs = set(repo.revs(b'(%ld::) - obsolete()', source_revs))
if wdirrev in source_revs:
# `wdir()::` is currently empty, so manually add wdir
......@@ -428,7 +460,7 @@ def getrevstofix(ui, repo, opts):
if repo[b'.'].rev() in revs:
revs.add(wdirrev)
else:
revs = set(scmutil.revrange(repo, opts[b'rev']))
revs = set(logcmdutil.revrange(repo, opts[b'rev']))
if opts.get(b'working_dir'):
revs.add(wdirrev)
for rev in revs:
......@@ -516,9 +548,9 @@ def getbasepaths(repo, opts, workqueue, basectxs):
return {}
basepaths = {}
for rev, path in workqueue:
fixctx = repo[rev]
for basectx in basectxs[rev]:
for srcrev, path, _dstrevs in workqueue:
fixctx = repo[srcrev]
for basectx in basectxs[srcrev]:
basepath = copies.pathcopies(basectx, fixctx).get(path, path)
if basepath in basectx:
basepaths[(basectx.rev(), fixctx.rev(), path)] = basepath
......@@ -618,7 +650,7 @@ def getbasectxs(repo, opts, revstofix):
# The --base flag overrides the usual logic, and we give every revision
# exactly the set of baserevs that the user specified.
if opts.get(b'base'):
baserevs = set(scmutil.revrange(repo, opts.get(b'base')))
baserevs = set(logcmdutil.revrange(repo, opts.get(b'base')))
if not baserevs:
baserevs = {nullrev}
basectxs = {repo[rev] for rev in baserevs}
......@@ -641,10 +673,10 @@ def _prefetchfiles(repo, workqueue, basepaths):
toprefetch = set()
# Prefetch the files that will be fixed.
for rev, path in workqueue:
if rev == wdirrev:
for srcrev, path, _dstrevs in workqueue:
if srcrev == wdirrev:
continue
toprefetch.add((rev, path))
toprefetch.add((srcrev, path))
# Prefetch the base contents for lineranges().
for (baserev, fixrev, path), basepath in basepaths.items():
......
......@@ -333,7 +333,11 @@ def overridewalk(orig, self, match, subrepos, unknown, ignored, full=True):
# for better performance, directly access the inner dirstate map if the
# standard dirstate implementation is in use.
dmap = dmap._map
nonnormalset = self._map.nonnormalset
nonnormalset = {
f
for f, e in self._map.items()
if e.v1_state() != "n" or e.v1_mtime() == -1
}
copymap = self._map.copymap
getkind = stat.S_IFMT
......@@ -560,8 +564,8 @@ def overridestatus(
for i, (s1, s2) in enumerate(zip(l1, l2)):
if set(s1) != set(s2):
f.write(b'sets at position %d are unequal\n' % i)
f.write(b'watchman returned: %s\n' % s1)
f.write(b'stat returned: %s\n' % s2)
f.write(b'watchman returned: %r\n' % s1)
f.write(b'stat returned: %r\n' % s2)
finally:
f.close()
......
This diff is collapsed.
......@@ -431,18 +431,19 @@ def localrepolistkeys(orig, self, namespace, patterns=None):
@wireprotov1peer.batchable
def listkeyspatterns(self, namespace, patterns):
if not self.capable(b'pushkey'):
yield {}, None
f = wireprotov1peer.future()
return {}, None
self.ui.debug(b'preparing listkeys for "%s"\n' % namespace)
yield {
def decode(d):
self.ui.debug(
b'received listkey for "%s": %i bytes\n' % (namespace, len(d))
)
return pushkey.decodekeys(d)
return {
b'namespace': encoding.fromlocal(namespace),
b'patterns': wireprototypes.encodelist(patterns),
}, f
d = f.value
self.ui.debug(
b'received listkey for "%s": %i bytes\n' % (namespace, len(d))
)
yield pushkey.decodekeys(d)
}, decode
def _readbundlerevs(bundlerepo):
......
......@@ -26,6 +26,7 @@ from mercurial import (
exthelper,
hg,
lock,
logcmdutil,
match as matchmod,
pycompat,
scmutil,
......@@ -540,7 +541,7 @@ def updatelfiles(
expecthash = lfutil.readasstandin(wctx[standin])
if expecthash != b'':
if lfile not in wctx: # not switched to normal file
if repo.dirstate[standin] != b'?':
if repo.dirstate.get_entry(standin).any_tracked:
wvfs.unlinkpath(lfile, ignoremissing=True)
else:
dropped.add(lfile)
......@@ -568,7 +569,7 @@ def updatelfiles(
removed += 1
# largefile processing might be slow and be interrupted - be prepared
lfdirstate.write()
lfdirstate.write(repo.currenttransaction())
if lfiles:
lfiles = [f for f in lfiles if f not in dropped]
......@@ -577,7 +578,7 @@ def updatelfiles(
repo.wvfs.unlinkpath(lfutil.standin(f))
# This needs to happen for dropped files, otherwise they stay in
# the M state.
lfdirstate._drop(f)
lfdirstate._map.reset_state(f)
statuswriter(_(b'getting changed largefiles\n'))
cachelfiles(ui, repo, None, lfiles)
......@@ -618,7 +619,7 @@ def updatelfiles(
lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
lfdirstate.write()
lfdirstate.write(repo.currenttransaction())
if lfiles:
statuswriter(
_(b'%d largefiles updated, %d removed\n') % (updated, removed)
......@@ -657,7 +658,7 @@ def lfpull(ui, repo, source=b"default", **opts):
revs = opts.get('rev', [])
if not revs:
raise error.Abort(_(b'no revisions specified'))
revs = scmutil.revrange(repo, revs)
revs = logcmdutil.revrange(repo, revs)
numcached = 0
for rev in revs:
......
......@@ -191,10 +191,12 @@ class largefilesdirstate(dirstate.dirstate):
def _ignore(self, f):
return False
def write(self, tr=False):
def write(self, tr):
# (1) disable PENDING mode always
# (lfdirstate isn't yet managed as a part of the transaction)
# (2) avoid develwarn 'use dirstate.write with ....'
if tr:
tr.addbackup(b'largefiles/dirstate', location=b'plain')
super(largefilesdirstate, self).write(None)
......@@ -269,7 +271,7 @@ def listlfiles(repo, rev=None, matcher=None):