Newer
Older
# wrapper.py - methods wrapping core mercurial logic
#
# Copyright 2017 Facebook, Inc.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import hashlib
from mercurial.i18n import _
from mercurial.node import bin, nullid, short
from mercurial import (
error,
filelog,
revlog,
util,
)
from ..largefiles import lfutil
from . import (
blobstore,
pointer,
)
def supportedoutgoingversions(orig, repo):
versions = orig(repo)
if 'lfs' in repo.requirements:
versions.discard('01')
versions.discard('02')
versions.add('03')
return versions
def allsupportedversions(orig, ui):
versions = orig(ui)
versions.add('03')
return versions
Matt Harbison
committed
def _capabilities(orig, repo, proto):
'''Wrap server command to announce lfs server capability'''
caps = orig(repo, proto)
# XXX: change to 'lfs=serve' when separate git server isn't required?
caps.append('lfs')
return caps
def bypasscheckhash(self, text):
return False
def readfromstore(self, text):
"""Read filelog content from local blobstore transform for flagprocessor.
Default tranform for flagprocessor, returning contents from blobstore.
Returns a 2-typle (text, validatehash) where validatehash is True as the
contents of the blobstore should be checked using checkhash.
"""
p = pointer.deserialize(text)
oid = p.oid()
store = self.opener.lfslocalblobstore
if not store.has(oid):
p.filename = self.filename
self.opener.lfsremoteblobstore.readbatch([p], store)
Matt Harbison
committed
# The caller will validate the content
text = store.read(oid, verify=False)
# pack hg filelog metadata
hgmeta = {}
for k in p.keys():
if k.startswith('x-hg-'):
name = k[len('x-hg-'):]
hgmeta[name] = p[k]
if hgmeta or text.startswith('\1\n'):
text = filelog.packmeta(hgmeta, text)
return (text, True)
def writetostore(self, text):
# hg filelog metadata (includes rename, etc)
hgmeta, offset = filelog.parsemeta(text)
if offset and offset > 0:
# lfs blob does not contain hg filelog metadata
text = text[offset:]
# git-lfs only supports sha256
oid = hashlib.sha256(text).hexdigest()
self.opener.lfslocalblobstore.write(oid, text)
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
# replace contents with metadata
longoid = 'sha256:%s' % oid
metadata = pointer.gitlfspointer(oid=longoid, size=str(len(text)))
# by default, we expect the content to be binary. however, LFS could also
# be used for non-binary content. add a special entry for non-binary data.
# this will be used by filectx.isbinary().
if not util.binary(text):
# not hg filelog metadata (affecting commit hash), no "x-hg-" prefix
metadata['x-is-binary'] = '0'
# translate hg filelog metadata to lfs metadata with "x-hg-" prefix
if hgmeta is not None:
for k, v in hgmeta.iteritems():
metadata['x-hg-%s' % k] = v
rawtext = metadata.serialize()
return (rawtext, False)
def _islfs(rlog, node=None, rev=None):
if rev is None:
if node is None:
# both None - likely working copy content where node is not ready
return False
rev = rlog.rev(node)
else:
node = rlog.node(rev)
if node == nullid:
return False
flags = rlog.flags(rev)
return bool(flags & revlog.REVIDX_EXTSTORED)
def filelogaddrevision(orig, self, text, transaction, link, p1, p2,
cachedelta=None, node=None,
flags=revlog.REVIDX_DEFAULT_FLAGS, **kwds):
threshold = self.opener.options['lfsthreshold']
textlen = len(text)
# exclude hg rename meta from file size
meta, offset = filelog.parsemeta(text)
if offset:
textlen -= offset
if threshold and textlen > threshold:
flags |= revlog.REVIDX_EXTSTORED
return orig(self, text, transaction, link, p1, p2, cachedelta=cachedelta,
node=node, flags=flags, **kwds)
def filelogrenamed(orig, self, node):
if _islfs(self, node):
rawtext = self.revision(node, raw=True)
if not rawtext:
return False
metadata = pointer.deserialize(rawtext)
if 'x-hg-copy' in metadata and 'x-hg-copyrev' in metadata:
return metadata['x-hg-copy'], bin(metadata['x-hg-copyrev'])
else:
return False
return orig(self, node)
def filelogsize(orig, self, rev):
if _islfs(self, rev=rev):
# fast path: use lfs metadata to answer size
rawtext = self.revision(rev, raw=True)
metadata = pointer.deserialize(rawtext)
return int(metadata['size'])
return orig(self, rev)
def filectxcmp(orig, self, fctx):
"""returns True if text is different than fctx"""
# some fctx (ex. hg-git) is not based on basefilectx and do not have islfs
if self.islfs() and getattr(fctx, 'islfs', lambda: False)():
# fast path: check LFS oid
p1 = pointer.deserialize(self.rawdata())
p2 = pointer.deserialize(fctx.rawdata())
return p1.oid() != p2.oid()
return orig(self, fctx)
def filectxisbinary(orig, self):
if self.islfs():
# fast path: use lfs metadata to answer isbinary
metadata = pointer.deserialize(self.rawdata())
# if lfs metadata says nothing, assume it's binary by default
return bool(int(metadata.get('x-is-binary', 1)))
return orig(self)
def filectxislfs(self):
return _islfs(self.filelog(), self.filenode())
def convertsink(orig, sink):
sink = orig(sink)
if sink.repotype == 'hg':
class lfssink(sink.__class__):
def putcommit(self, files, copies, parents, commit, source, revmap,
full, cleanp2):
pc = super(lfssink, self).putcommit
node = pc(files, copies, parents, commit, source, revmap, full,
cleanp2)
if 'lfs' not in self.repo.requirements:
ctx = self.repo[node]
# The file list may contain removed files, so check for
# membership before assuming it is in the context.
if any(f in ctx and ctx[f].islfs() for f, n in files):
self.repo.requirements.add('lfs')
self.repo._writerequirements()
# Permanently enable lfs locally
with self.repo.vfs('hgrc', 'a', text=True) as fp:
fp.write('\n[extensions]\nlfs=\n')
return node
sink.__class__ = lfssink
return sink
def vfsinit(orig, self, othervfs):
orig(self, othervfs)
# copy lfs related options
for k, v in othervfs.options.items():
if k.startswith('lfs'):
self.options[k] = v
# also copy lfs blobstores. note: this can run before reposetup, so lfs
# blobstore attributes are not always ready at this time.
for name in ['lfslocalblobstore', 'lfsremoteblobstore']:
if util.safehasattr(othervfs, name):
setattr(self, name, getattr(othervfs, name))
Matt Harbison
committed
def hgclone(orig, ui, opts, *args, **kwargs):
result = orig(ui, opts, *args, **kwargs)
if result is not None:
sourcerepo, destrepo = result
repo = destrepo.local()
# When cloning to a remote repo (like through SSH), no repo is available
# from the peer. Therefore the hgrc can't be updated.
if not repo:
return result
# If lfs is required for this repo, permanently enable it locally
if 'lfs' in repo.requirements:
with repo.vfs('hgrc', 'a', text=True) as fp:
fp.write('\n[extensions]\nlfs=\n')
return result
Matt Harbison
committed
def hgpostshare(orig, sourcerepo, destrepo, bookmarks=True, defaultpath=None):
orig(sourcerepo, destrepo, bookmarks, defaultpath)
# If lfs is required for this repo, permanently enable it locally
if 'lfs' in destrepo.requirements:
with destrepo.vfs('hgrc', 'a', text=True) as fp:
fp.write('\n[extensions]\nlfs=\n')
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
def _canskipupload(repo):
# if remotestore is a null store, upload is a no-op and can be skipped
return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
def candownload(repo):
# if remotestore is a null store, downloads will lead to nothing
return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
def uploadblobsfromrevs(repo, revs):
'''upload lfs blobs introduced by revs
Note: also used by other extensions e. g. infinitepush. avoid renaming.
'''
if _canskipupload(repo):
return
pointers = extractpointers(repo, revs)
uploadblobs(repo, pointers)
def prepush(pushop):
"""Prepush hook.
Read through the revisions to push, looking for filelog entries that can be
deserialized into metadata so that we can block the push on their upload to
the remote blobstore.
"""
return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing)
Matt Harbison
committed
def push(orig, repo, remote, *args, **kwargs):
"""bail on push if the extension isn't enabled on remote when needed"""
if 'lfs' in repo.requirements:
# If the remote peer is for a local repo, the requirement tests in the
# base class method enforce lfs support. Otherwise, some revisions in
# this repo use lfs, and the remote repo needs the extension loaded.
if not remote.local() and not remote.capable('lfs'):
# This is a copy of the message in exchange.push() when requirements
# are missing between local repos.
m = _("required features are not supported in the destination: %s")
raise error.Abort(m % 'lfs',
hint=_('enable the lfs extension on the server'))
return orig(repo, remote, *args, **kwargs)
def writenewbundle(orig, ui, repo, source, filename, bundletype, outgoing,
*args, **kwargs):
"""upload LFS blobs added by outgoing revisions on 'hg bundle'"""
uploadblobsfromrevs(repo, outgoing.missing)
return orig(ui, repo, source, filename, bundletype, outgoing, *args,
**kwargs)
def extractpointers(repo, revs):
"""return a list of lfs pointers added by given revs"""
repo.ui.debug('lfs: computing set of blobs to upload\n')
pointers = {}
for r in revs:
ctx = repo[r]
for p in pointersfromctx(ctx).values():
pointers[p.oid()] = p
return sorted(pointers.values())
def pointersfromctx(ctx):
"""return a dict {path: pointer} for given single changectx"""
result = {}
for f in ctx.files():
if f not in ctx:
continue
fctx = ctx[f]
if not _islfs(fctx.filelog(), fctx.filenode()):
continue
try:
result[f] = pointer.deserialize(fctx.rawdata())
except pointer.InvalidPointer as ex:
raise error.Abort(_('lfs: corrupted pointer (%s@%s): %s\n')
% (f, short(ctx.node()), ex))
return result
def uploadblobs(repo, pointers):
"""upload given pointers from local blobstore"""
if not pointers:
return
remoteblob = repo.svfs.lfsremoteblobstore
remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore)
def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements):
orig(ui, srcrepo, dstrepo, requirements)
srclfsvfs = srcrepo.svfs.lfslocalblobstore.vfs
dstlfsvfs = dstrepo.svfs.lfslocalblobstore.vfs
for dirpath, dirs, files in srclfsvfs.walk():
for oid in files:
Matt Harbison
committed
ui.write(_('copying lfs blob %s\n') % oid)
lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid))
def upgraderequirements(orig, repo):
reqs = orig(repo)
if 'lfs' in repo.requirements:
reqs.add('lfs')
return reqs