streamclone: include obsstore file into stream bundle if client can read it
authorAnton Shestakov <av6@dwimlabs.net>
Fri, 05 Oct 2018 23:27:17 +0800
changeset 53132 0ac794e0e285cd02bcb8576d0ae30f092abf91e0
parent 53131 36b134c436b89abe7d4ff4550cfb5afc0de1065c
child 53133 4ab6e7b4fe8ac60873b4eeb3d26a5efd0e38b204
push id1065
push usergszorc@mozilla.com
push dateWed, 31 Oct 2018 02:06:22 +0000
streamclone: include obsstore file into stream bundle if client can read it
mercurial/bundle2.py
mercurial/streamclone.py
tests/test-clone-uncompressed.t
--- a/mercurial/bundle2.py
+++ b/mercurial/bundle2.py
@@ -1692,18 +1692,25 @@ def addpartbundlestream2(bundler, repo, 
     excludepats = kwargs.get(r'excludepats')
 
     narrowstream = repo.ui.configbool('experimental.server',
                                       'stream-narrow-clones')
 
     if (includepats or excludepats) and not narrowstream:
         raise error.Abort(_('server does not support narrow stream clones'))
 
+    includeobsmarkers = False
+    if repo.obsstore:
+        remoteversions = obsmarkersversion(bundler.capabilities)
+        if repo.obsstore._version in remoteversions:
+            includeobsmarkers = True
+
     filecount, bytecount, it = streamclone.generatev2(repo, includepats,
-                                                      excludepats)
+                                                      excludepats,
+                                                      includeobsmarkers)
     requirements = _formatrequirementsspec(repo.requirements)
     part = bundler.newpart('stream2', data=it)
     part.addparam('bytecount', '%d' % bytecount, mandatory=True)
     part.addparam('filecount', '%d' % filecount, mandatory=True)
     part.addparam('requirements', requirements, mandatory=True)
 
 def buildobsmarkerspart(bundler, markers):
     """add an obsmarker part to the bundler with <markers>
--- a/mercurial/streamclone.py
+++ b/mercurial/streamclone.py
@@ -527,17 +527,17 @@ def _emit2(repo, entries, totalfilesize)
                     chunks = util.filechunkiter(fp, limit=size)
                 for chunk in chunks:
                     seen += len(chunk)
                     progress.update(seen)
                     yield chunk
             finally:
                 fp.close()
 
-def generatev2(repo, includes, excludes):
+def generatev2(repo, includes, excludes, includeobsmarkers):
     """Emit content for version 2 of a streaming clone.
 
     the data stream consists the following entries:
     1) A char representing the file destination (eg: store or cache)
     2) A varint containing the length of the filename
     3) A varint containing the length of file data
     4) N bytes containing the filename (the internal, store-agnostic form)
     5) N bytes containing the file data
@@ -562,16 +562,19 @@ def generatev2(repo, includes, excludes)
         for name, ename, size in _walkstreamfiles(repo, matcher):
             if size:
                 entries.append((_srcstore, name, _fileappend, size))
                 totalfilesize += size
         for name in _walkstreamfullstorefiles(repo):
             if repo.svfs.exists(name):
                 totalfilesize += repo.svfs.lstat(name).st_size
                 entries.append((_srcstore, name, _filefull, None))
+        if includeobsmarkers and repo.svfs.exists('obsstore'):
+            totalfilesize += repo.svfs.lstat('obsstore').st_size
+            entries.append((_srcstore, 'obsstore', _filefull, None))
         for name in cacheutil.cachetocopy(repo):
             if repo.cachevfs.exists(name):
                 totalfilesize += repo.cachevfs.lstat(name).st_size
                 entries.append((_srccache, name, _filefull, None))
 
         chunks = _emit2(repo, entries, totalfilesize)
         first = next(chunks)
         assert first is None
--- a/tests/test-clone-uncompressed.t
+++ b/tests/test-clone-uncompressed.t
@@ -509,8 +509,53 @@ stream v1 unsuitable for non-publishing 
   updating to branch default
   1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ hg -R phase-no-publish phase -r 'all()'
   0: draft
   1: draft
 #endif
 
   $ killdaemons.py
+
+#if stream-legacy
+
+With v1 of the stream protocol, changeset are always cloned as public. There's
+no obsolescence markers exchange in stream v1.
+
+#endif
+#if stream-bundle2
+
+Stream repository with obsolescence
+-----------------------------------
+
+Clone non-publishing with obsolescence
+
+  $ cat >> $HGRCPATH << EOF
+  > [experimental]
+  > evolution=all
+  > EOF
+
+  $ cd server
+  $ echo foo > foo
+  $ hg -q commit -m 'about to be pruned'
+  $ hg debugobsolete `hg log -r . -T '{node}'` -d '0 0' -u test --record-parents
+  obsoleted 1 changesets
+  $ hg up null -q
+  $ hg log -T '{rev}: {phase}\n'
+  1: draft
+  0: draft
+  $ hg serve -p $HGPORT -d --pid-file=hg.pid
+  $ cat hg.pid > $DAEMON_PIDS
+  $ cd ..
+
+  $ hg clone -U --stream http://localhost:$HGPORT with-obsolescence
+  streaming all changes
+  1035 files to transfer, 97.1 KB of data
+  transferred 97.1 KB in * seconds (* */sec) (glob)
+  $ hg -R with-obsolescence log -T '{rev}: {phase}\n'
+  1: draft
+  0: draft
+  $ hg debugobsolete -R with-obsolescence
+  50382b884f66690b7045cac93a540cba4d4c906f 0 {c17445101a72edac06facd130d14808dfbd5c7c2} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+
+  $ killdaemons.py
+
+#endif