From 4177eefd7bdaea96a529b00ba9cf751924ede202 Mon Sep 17 00:00:00 2001 From: Sebastian Thiel Date: Thu, 5 May 2011 19:43:22 +0200 Subject: Added all code from gitdb to gitpython. Next is to make it generally work. Then the tests will need some work --- git/test/performance/test_pack.py | 90 +++++++++++++++ git/test/performance/test_pack_streaming.py | 80 ++++++++++++++ git/test/performance/test_streams.py | 165 ++++++++++++++++++++++++++++ 3 files changed, 335 insertions(+) create mode 100644 git/test/performance/test_pack.py create mode 100644 git/test/performance/test_pack_streaming.py (limited to 'git/test/performance') diff --git a/git/test/performance/test_pack.py b/git/test/performance/test_pack.py new file mode 100644 index 00000000..da952b17 --- /dev/null +++ b/git/test/performance/test_pack.py @@ -0,0 +1,90 @@ +# Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors +# +# This module is part of GitDB and is released under +# the New BSD License: http://www.opensource.org/licenses/bsd-license.php +"""Performance tests for object store""" +from lib import ( + TestBigRepoR + ) + +from gitdb.exc import UnsupportedOperation +from gitdb.db.pack import PackedDB + +import sys +import os +from time import time +import random + +class TestPackedDBPerformance(TestBigRepoR): + + def _test_pack_random_access(self): + pdb = PackedDB(os.path.join(self.gitrepopath, "objects/pack")) + + # sha lookup + st = time() + sha_list = list(pdb.sha_iter()) + elapsed = time() - st + ns = len(sha_list) + print >> sys.stderr, "PDB: looked up %i shas by index in %f s ( %f shas/s )" % (ns, elapsed, ns / elapsed) + + # sha lookup: best-case and worst case access + pdb_pack_info = pdb._pack_info + # END shuffle shas + st = time() + for sha in sha_list: + pdb_pack_info(sha) + # END for each sha to look up + elapsed = time() - st + + # discard cache + del(pdb._entities) + pdb.entities() + print >> sys.stderr, "PDB: looked up %i sha in %i packs in %f s ( %f shas/s )" % (ns, len(pdb.entities()), elapsed, ns / elapsed) + # END for each random mode + + # query info and streams only + max_items = 10000 # can wait longer when testing memory + for pdb_fun in (pdb.info, pdb.stream): + st = time() + for sha in sha_list[:max_items]: + pdb_fun(sha) + elapsed = time() - st + print >> sys.stderr, "PDB: Obtained %i object %s by sha in %f s ( %f items/s )" % (max_items, pdb_fun.__name__.upper(), elapsed, max_items / elapsed) + # END for each function + + # retrieve stream and read all + max_items = 5000 + pdb_stream = pdb.stream + total_size = 0 + st = time() + for sha in sha_list[:max_items]: + stream = pdb_stream(sha) + stream.read() + total_size += stream.size + elapsed = time() - st + total_kib = total_size / 1000 + print >> sys.stderr, "PDB: Obtained %i streams by sha and read all bytes totallying %i KiB ( %f KiB / s ) in %f s ( %f streams/s )" % (max_items, total_kib, total_kib/elapsed , elapsed, max_items / elapsed) + + def test_correctness(self): + pdb = PackedDB(os.path.join(self.gitrepopath, "objects/pack")) + # disabled for now as it used to work perfectly, checking big repositories takes a long time + print >> sys.stderr, "Endurance run: verify streaming of objects (crc and sha)" + for crc in range(2): + count = 0 + st = time() + for entity in pdb.entities(): + pack_verify = entity.is_valid_stream + sha_by_index = entity.index().sha + for index in xrange(entity.index().size()): + try: + assert pack_verify(sha_by_index(index), use_crc=crc) + count += 1 + except UnsupportedOperation: + pass + # END ignore old indices + # END for each index + # END for each entity + elapsed = time() - st + print >> sys.stderr, "PDB: verified %i objects (crc=%i) in %f s ( %f objects/s )" % (count, crc, elapsed, count / elapsed) + # END for each verify mode + diff --git a/git/test/performance/test_pack_streaming.py b/git/test/performance/test_pack_streaming.py new file mode 100644 index 00000000..795ed1e2 --- /dev/null +++ b/git/test/performance/test_pack_streaming.py @@ -0,0 +1,80 @@ +# Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors +# +# This module is part of GitDB and is released under +# the New BSD License: http://www.opensource.org/licenses/bsd-license.php +"""Specific test for pack streams only""" +from lib import ( + TestBigRepoR + ) + +from gitdb.db.pack import PackedDB +from gitdb.stream import NullStream +from gitdb.pack import PackEntity + +import os +import sys +from time import time +from nose import SkipTest + +class CountedNullStream(NullStream): + __slots__ = '_bw' + def __init__(self): + self._bw = 0 + + def bytes_written(self): + return self._bw + + def write(self, d): + self._bw += NullStream.write(self, d) + + +class TestPackStreamingPerformance(TestBigRepoR): + + def test_pack_writing(self): + # see how fast we can write a pack from object streams. + # This will not be fast, as we take time for decompressing the streams as well + ostream = CountedNullStream() + pdb = PackedDB(os.path.join(self.gitrepopath, "objects/pack")) + + ni = 5000 + count = 0 + total_size = 0 + st = time() + objs = list() + for sha in pdb.sha_iter(): + count += 1 + objs.append(pdb.stream(sha)) + if count == ni: + break + #END gather objects for pack-writing + elapsed = time() - st + print >> sys.stderr, "PDB Streaming: Got %i streams by sha in in %f s ( %f streams/s )" % (ni, elapsed, ni / elapsed) + + st = time() + PackEntity.write_pack(objs, ostream.write) + elapsed = time() - st + total_kb = ostream.bytes_written() / 1000 + print >> sys.stderr, "PDB Streaming: Wrote pack of size %i kb in %f s (%f kb/s)" % (total_kb, elapsed, total_kb/elapsed) + + + def test_stream_reading(self): + raise SkipTest() + pdb = PackedDB(os.path.join(self.gitrepopath, "objects/pack")) + + # streaming only, meant for --with-profile runs + ni = 5000 + count = 0 + pdb_stream = pdb.stream + total_size = 0 + st = time() + for sha in pdb.sha_iter(): + if count == ni: + break + stream = pdb_stream(sha) + stream.read() + total_size += stream.size + count += 1 + elapsed = time() - st + total_kib = total_size / 1000 + print >> sys.stderr, "PDB Streaming: Got %i streams by sha and read all bytes totallying %i KiB ( %f KiB / s ) in %f s ( %f streams/s )" % (ni, total_kib, total_kib/elapsed , elapsed, ni / elapsed) + diff --git a/git/test/performance/test_streams.py b/git/test/performance/test_streams.py index 7f17d722..196e9003 100644 --- a/git/test/performance/test_streams.py +++ b/git/test/performance/test_streams.py @@ -1,9 +1,17 @@ """Performance data streaming performance""" +from gitdb.db.py import * +from gitdb.base import * +from gitdb.stream import * +from gitdb.util import ( + pool, + bin_to_hex + ) from git.test.lib import * from gitdb import * from gitdb.util import bin_to_hex +from cStringIO import StringIO from time import time import os import sys @@ -14,9 +22,35 @@ from gitdb.test.lib import make_memory_file from lib import ( TestBigRepoR + make_memory_file, + with_rw_directory ) +#{ Utilities +def read_chunked_stream(stream): + total = 0 + while True: + chunk = stream.read(chunk_size) + total += len(chunk) + if len(chunk) < chunk_size: + break + # END read stream loop + assert total == stream.size + return stream + + +class TestStreamReader(ChannelThreadTask): + """Expects input streams and reads them in chunks. It will read one at a time, + requireing a queue chunk of size 1""" + def __init__(self, *args): + super(TestStreamReader, self).__init__(*args) + self.fun = read_chunked_stream + self.max_chunksize = 1 + + +#} END utilities + class TestObjDBPerformance(TestBigRepoR): large_data_size_bytes = 1000*1000*10 # some MiB should do it @@ -129,3 +163,134 @@ class TestObjDBPerformance(TestBigRepoR): # compare print >> sys.stderr, "Git-Python is %f %% faster than git when reading big %s files in chunks" % (100.0 - (elapsed_readchunks / gelapsed_readchunks) * 100, desc) # END for each randomization factor + + @with_rw_directory + def test_large_data_streaming(self, path): + ldb = PureLooseObjectODB(path) + string_ios = list() # list of streams we previously created + + # serial mode + for randomize in range(2): + desc = (randomize and 'random ') or '' + print >> sys.stderr, "Creating %s data ..." % desc + st = time() + size, stream = make_memory_file(self.large_data_size_bytes, randomize) + elapsed = time() - st + print >> sys.stderr, "Done (in %f s)" % elapsed + string_ios.append(stream) + + # writing - due to the compression it will seem faster than it is + st = time() + sha = ldb.store(IStream('blob', size, stream)).binsha + elapsed_add = time() - st + assert ldb.has_object(sha) + db_file = ldb.readable_db_object_path(bin_to_hex(sha)) + fsize_kib = os.path.getsize(db_file) / 1000 + + + size_kib = size / 1000 + print >> sys.stderr, "Added %i KiB (filesize = %i KiB) of %s data to loose odb in %f s ( %f Write KiB / s)" % (size_kib, fsize_kib, desc, elapsed_add, size_kib / elapsed_add) + + # reading all at once + st = time() + ostream = ldb.stream(sha) + shadata = ostream.read() + elapsed_readall = time() - st + + stream.seek(0) + assert shadata == stream.getvalue() + print >> sys.stderr, "Read %i KiB of %s data at once from loose odb in %f s ( %f Read KiB / s)" % (size_kib, desc, elapsed_readall, size_kib / elapsed_readall) + + + # reading in chunks of 1 MiB + cs = 512*1000 + chunks = list() + st = time() + ostream = ldb.stream(sha) + while True: + data = ostream.read(cs) + chunks.append(data) + if len(data) < cs: + break + # END read in chunks + elapsed_readchunks = time() - st + + stream.seek(0) + assert ''.join(chunks) == stream.getvalue() + + cs_kib = cs / 1000 + print >> sys.stderr, "Read %i KiB of %s data in %i KiB chunks from loose odb in %f s ( %f Read KiB / s)" % (size_kib, desc, cs_kib, elapsed_readchunks, size_kib / elapsed_readchunks) + + # del db file so we keep something to do + os.remove(db_file) + # END for each randomization factor + + + # multi-threaded mode + # want two, should be supported by most of todays cpus + pool.set_size(2) + total_kib = 0 + nsios = len(string_ios) + for stream in string_ios: + stream.seek(0) + total_kib += len(stream.getvalue()) / 1000 + # END rewind + + def istream_iter(): + for stream in string_ios: + stream.seek(0) + yield IStream(str_blob_type, len(stream.getvalue()), stream) + # END for each stream + # END util + + # write multiple objects at once, involving concurrent compression + reader = IteratorReader(istream_iter()) + istream_reader = ldb.store_async(reader) + istream_reader.task().max_chunksize = 1 + + st = time() + istreams = istream_reader.read(nsios) + assert len(istreams) == nsios + elapsed = time() - st + + print >> sys.stderr, "Threads(%i): Compressed %i KiB of data in loose odb in %f s ( %f Write KiB / s)" % (pool.size(), total_kib, elapsed, total_kib / elapsed) + + # decompress multiple at once, by reading them + # chunk size is not important as the stream will not really be decompressed + + # until its read + istream_reader = IteratorReader(iter([ i.binsha for i in istreams ])) + ostream_reader = ldb.stream_async(istream_reader) + + chunk_task = TestStreamReader(ostream_reader, "chunker", None) + output_reader = pool.add_task(chunk_task) + output_reader.task().max_chunksize = 1 + + st = time() + assert len(output_reader.read(nsios)) == nsios + elapsed = time() - st + + print >> sys.stderr, "Threads(%i): Decompressed %i KiB of data in loose odb in %f s ( %f Read KiB / s)" % (pool.size(), total_kib, elapsed, total_kib / elapsed) + + # store the files, and read them back. For the reading, we use a task + # as well which is chunked into one item per task. Reading all will + # very quickly result in two threads handling two bytestreams of + # chained compression/decompression streams + reader = IteratorReader(istream_iter()) + istream_reader = ldb.store_async(reader) + istream_reader.task().max_chunksize = 1 + + istream_to_sha = lambda items: [ i.binsha for i in items ] + istream_reader.set_post_cb(istream_to_sha) + + ostream_reader = ldb.stream_async(istream_reader) + + chunk_task = TestStreamReader(ostream_reader, "chunker", None) + output_reader = pool.add_task(chunk_task) + output_reader.max_chunksize = 1 + + st = time() + assert len(output_reader.read(nsios)) == nsios + elapsed = time() - st + + print >> sys.stderr, "Threads(%i): Compressed and decompressed and read %i KiB of data in loose odb in %f s ( %f Combined KiB / s)" % (pool.size(), total_kib, elapsed, total_kib / elapsed) -- cgit v1.2.3 From acf5e6ea64a2f24117f1d419c208ed1c38c43690 Mon Sep 17 00:00:00 2001 From: Sebastian Thiel Date: Fri, 6 May 2011 15:03:14 +0200 Subject: replaced all gitdb strings with git --- git/test/performance/test_commit.py | 2 +- git/test/performance/test_pack.py | 4 ++-- git/test/performance/test_pack_streaming.py | 6 +++--- git/test/performance/test_streams.py | 16 ++++++++-------- 4 files changed, 14 insertions(+), 14 deletions(-) (limited to 'git/test/performance') diff --git a/git/test/performance/test_commit.py b/git/test/performance/test_commit.py index 80421aa2..561e073f 100644 --- a/git/test/performance/test_commit.py +++ b/git/test/performance/test_commit.py @@ -6,7 +6,7 @@ from lib import * from git import * -from gitdb import IStream +from git import IStream from git.test.test_commit import assert_commit_serialization from cStringIO import StringIO from time import time diff --git a/git/test/performance/test_pack.py b/git/test/performance/test_pack.py index da952b17..b1f3abe1 100644 --- a/git/test/performance/test_pack.py +++ b/git/test/performance/test_pack.py @@ -7,8 +7,8 @@ from lib import ( TestBigRepoR ) -from gitdb.exc import UnsupportedOperation -from gitdb.db.pack import PackedDB +from git.exc import UnsupportedOperation +from git.db.pack import PackedDB import sys import os diff --git a/git/test/performance/test_pack_streaming.py b/git/test/performance/test_pack_streaming.py index 795ed1e2..cc890ee0 100644 --- a/git/test/performance/test_pack_streaming.py +++ b/git/test/performance/test_pack_streaming.py @@ -7,9 +7,9 @@ from lib import ( TestBigRepoR ) -from gitdb.db.pack import PackedDB -from gitdb.stream import NullStream -from gitdb.pack import PackEntity +from git.db.pack import PackedDB +from git.stream import NullStream +from git.pack import PackEntity import os import sys diff --git a/git/test/performance/test_streams.py b/git/test/performance/test_streams.py index 196e9003..f63d922d 100644 --- a/git/test/performance/test_streams.py +++ b/git/test/performance/test_streams.py @@ -1,15 +1,15 @@ """Performance data streaming performance""" -from gitdb.db.py import * -from gitdb.base import * -from gitdb.stream import * -from gitdb.util import ( +from git.db.py import * +from git.base import * +from git.stream import * +from git.util import ( pool, bin_to_hex ) from git.test.lib import * -from gitdb import * -from gitdb.util import bin_to_hex +from git import * +from git.util import bin_to_hex from cStringIO import StringIO from time import time @@ -18,7 +18,7 @@ import sys import stat import subprocess -from gitdb.test.lib import make_memory_file +from git.test.lib import make_memory_file from lib import ( TestBigRepoR @@ -58,7 +58,7 @@ class TestObjDBPerformance(TestBigRepoR): @with_rw_repo('HEAD', bare=True) def test_large_data_streaming(self, rwrepo): - # TODO: This part overlaps with the same file in gitdb.test.performance.test_stream + # TODO: This part overlaps with the same file in git.test.performance.test_stream # It should be shared if possible ldb = LooseObjectDB(os.path.join(rwrepo.git_dir, 'objects')) -- cgit v1.2.3 From 7ae36c3e019a5cc16924d1b6007774bfb625036f Mon Sep 17 00:00:00 2001 From: Sebastian Thiel Date: Fri, 6 May 2011 18:53:59 +0200 Subject: Started to fix imports - tests still have no chance to work as database changed drastically. Now the actual work begins --- git/test/performance/lib.py | 8 ++++---- git/test/performance/test_streams.py | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'git/test/performance') diff --git a/git/test/performance/lib.py b/git/test/performance/lib.py index d0727b60..08c0e91c 100644 --- a/git/test/performance/lib.py +++ b/git/test/performance/lib.py @@ -1,11 +1,11 @@ """Contains library functions""" import os -from git.test.lib import * +from git.test.lib import TestBase import shutil import tempfile from git.db import ( - GitCmdObjectDB, + CmdGitDB, GitDB ) @@ -54,7 +54,7 @@ class TestBigRepoR(TestBase): def setUpAll(cls): super(TestBigRepoR, cls).setUpAll() repo_path = resolve_or_fail(k_env_git_repo) - cls.gitrorepo = Repo(repo_path, odbt=GitCmdObjectDB) + cls.gitrorepo = Repo(repo_path, odbt=CmdGitDB) cls.puregitrorepo = Repo(repo_path, odbt=GitDB) @@ -68,7 +68,7 @@ class TestBigRepoRW(TestBigRepoR): super(TestBigRepoRW, cls).setUpAll() dirname = tempfile.mktemp() os.mkdir(dirname) - cls.gitrwrepo = cls.gitrorepo.clone(dirname, shared=True, bare=True, odbt=GitCmdObjectDB) + cls.gitrwrepo = cls.gitrorepo.clone(dirname, shared=True, bare=True, odbt=CmdGitDB) cls.puregitrwrepo = Repo(dirname, odbt=GitDB) @classmethod diff --git a/git/test/performance/test_streams.py b/git/test/performance/test_streams.py index f63d922d..25c211b0 100644 --- a/git/test/performance/test_streams.py +++ b/git/test/performance/test_streams.py @@ -7,7 +7,7 @@ from git.util import ( bin_to_hex ) -from git.test.lib import * +from git.test.lib import TestBase from git import * from git.util import bin_to_hex -- cgit v1.2.3 From 5426890ebd5a54fdc450f977137511bbd52f200e Mon Sep 17 00:00:00 2001 From: Sebastian Thiel Date: Mon, 6 Jun 2011 16:39:27 +0200 Subject: Fixed odb performance tests --- git/test/performance/__init__.py | 2 + git/test/performance/db/__init__.py | 1 + git/test/performance/db/odb_impl.py | 69 +++++++++++++++++++++++++++++++ git/test/performance/db/test_odb_cmd.py | 6 +++ git/test/performance/db/test_odb_pure.py | 6 +++ git/test/performance/lib.py | 38 ++++++++--------- git/test/performance/test_commit.py | 4 +- git/test/performance/test_odb.py | 70 -------------------------------- 8 files changed, 102 insertions(+), 94 deletions(-) create mode 100644 git/test/performance/__init__.py create mode 100644 git/test/performance/db/__init__.py create mode 100644 git/test/performance/db/odb_impl.py create mode 100644 git/test/performance/db/test_odb_cmd.py create mode 100644 git/test/performance/db/test_odb_pure.py delete mode 100644 git/test/performance/test_odb.py (limited to 'git/test/performance') diff --git a/git/test/performance/__init__.py b/git/test/performance/__init__.py new file mode 100644 index 00000000..6bd117b9 --- /dev/null +++ b/git/test/performance/__init__.py @@ -0,0 +1,2 @@ +"""Note: This init file makes the performance tests an integral part of the test suite +as nose will now pick them up. Previously the init file was intentionally omitted""" diff --git a/git/test/performance/db/__init__.py b/git/test/performance/db/__init__.py new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/git/test/performance/db/__init__.py @@ -0,0 +1 @@ + diff --git a/git/test/performance/db/odb_impl.py b/git/test/performance/db/odb_impl.py new file mode 100644 index 00000000..50ee37e1 --- /dev/null +++ b/git/test/performance/db/odb_impl.py @@ -0,0 +1,69 @@ +"""Performance tests for object store""" + +from time import time +import sys +import stat +import copy + +from git.test.performance.lib import ( + TestBigRepoR, + GlobalsItemDeletorMetaCls + ) + +class PerfBaseDeletorMetaClass(GlobalsItemDeletorMetaCls): + ModuleToDelete = 'TestObjDBPerformanceBase' + + +class TestObjDBPerformanceBase(TestBigRepoR): + __metaclass__ = PerfBaseDeletorMetaClass + + def test_random_access_test(self): + repo = self.rorepo + + # GET COMMITS + st = time() + root_commit = repo.commit(self.head_sha_2k) + commits = list(root_commit.traverse()) + nc = len(commits) + elapsed = time() - st + + print >> sys.stderr, "%s: Retrieved %i commits from ObjectStore in %g s ( %f commits / s )" % (type(repo.odb), nc, elapsed, nc / elapsed) + + # GET TREES + # walk all trees of all commits + st = time() + blobs_per_commit = list() + nt = 0 + for commit in commits: + tree = commit.tree + blobs = list() + for item in tree.traverse(): + nt += 1 + if item.type == 'blob': + blobs.append(item) + # direct access for speed + # END while trees are there for walking + blobs_per_commit.append(blobs) + # END for each commit + elapsed = time() - st + + print >> sys.stderr, "%s: Retrieved %i objects from %i commits in %g s ( %f objects / s )" % (type(repo.odb), nt, len(commits), elapsed, nt / elapsed) + + # GET BLOBS + st = time() + nb = 0 + too_many = 15000 + data_bytes = 0 + for blob_list in blobs_per_commit: + for blob in blob_list: + data_bytes += len(blob.data_stream.read()) + # END for each blobsha + nb += len(blob_list) + if nb > too_many: + break + # END for each bloblist + elapsed = time() - st + + print >> sys.stderr, "%s: Retrieved %i blob (%i KiB) and their data in %g s ( %f blobs / s, %f KiB / s )" % (type(repo.odb), nb, data_bytes/1000, elapsed, nb / elapsed, (data_bytes / 1000) / elapsed) + + diff --git a/git/test/performance/db/test_odb_cmd.py b/git/test/performance/db/test_odb_cmd.py new file mode 100644 index 00000000..acd55cc9 --- /dev/null +++ b/git/test/performance/db/test_odb_cmd.py @@ -0,0 +1,6 @@ +from git.db.complex import CmdCompatibilityGitDB +from odb_impl import TestObjDBPerformanceBase + +class TestCmdDB(TestObjDBPerformanceBase): + RepoCls = CmdCompatibilityGitDB + diff --git a/git/test/performance/db/test_odb_pure.py b/git/test/performance/db/test_odb_pure.py new file mode 100644 index 00000000..6ed3585d --- /dev/null +++ b/git/test/performance/db/test_odb_pure.py @@ -0,0 +1,6 @@ +from git.db.complex import PureCompatibilityGitDB +from odb_impl import TestObjDBPerformanceBase + +class TestPureDB(TestObjDBPerformanceBase): + RepoCls = PureCompatibilityGitDB + diff --git a/git/test/performance/lib.py b/git/test/performance/lib.py index 08c0e91c..cf80a0de 100644 --- a/git/test/performance/lib.py +++ b/git/test/performance/lib.py @@ -1,18 +1,12 @@ """Contains library functions""" import os -from git.test.lib import TestBase +from git.test.lib import ( + TestBase, + GlobalsItemDeletorMetaCls + ) import shutil import tempfile -from git.db import ( - CmdGitDB, - GitDB - ) - -from git import ( - Repo - ) - #{ Invvariants k_env_git_repo = "GIT_PYTHON_TEST_GIT_REPO_BASE" #} END invariants @@ -38,11 +32,7 @@ class TestBigRepoR(TestBase): * gitrorepo - * Read-Only git repository - actually the repo of git itself - - * puregitrorepo - - * As gitrepo, but uses pure python implementation + * a big read-only git repository """ #{ Invariants @@ -50,29 +40,33 @@ class TestBigRepoR(TestBase): head_sha_50 = '32347c375250fd470973a5d76185cac718955fd5' #} END invariants + #{ Configuration + RepoCls = None + #} END configuration + @classmethod def setUpAll(cls): super(TestBigRepoR, cls).setUpAll() - repo_path = resolve_or_fail(k_env_git_repo) - cls.gitrorepo = Repo(repo_path, odbt=CmdGitDB) - cls.puregitrorepo = Repo(repo_path, odbt=GitDB) + if cls.RepoCls is None: + raise AssertionError("Require RepoCls in class %s to be set" % cls) + #END assert configuration + cls.rorepo = cls.RepoCls(resolve_or_fail(k_env_git_repo)) class TestBigRepoRW(TestBigRepoR): """As above, but provides a big repository that we can write to. - Provides ``self.gitrwrepo`` and ``self.puregitrwrepo``""" + Provides ``self.rwrepo``""" @classmethod def setUpAll(cls): super(TestBigRepoRW, cls).setUpAll() dirname = tempfile.mktemp() os.mkdir(dirname) - cls.gitrwrepo = cls.gitrorepo.clone(dirname, shared=True, bare=True, odbt=CmdGitDB) - cls.puregitrwrepo = Repo(dirname, odbt=GitDB) + cls.rwrepo = cls.gitrorepo.clone(dirname, shared=True, bare=True) @classmethod def tearDownAll(cls): - shutil.rmtree(cls.gitrwrepo.working_dir) + shutil.rmtree(cls.rwrepo.working_dir) #} END base classes diff --git a/git/test/performance/test_commit.py b/git/test/performance/test_commit.py index 561e073f..72755e05 100644 --- a/git/test/performance/test_commit.py +++ b/git/test/performance/test_commit.py @@ -70,9 +70,9 @@ class TestPerformance(TestBigRepoRW): print >> sys.stderr, "Iterated %i Commits in %s [s] ( %f commits/s )" % (nc, elapsed_time, nc/elapsed_time) def test_commit_serialization(self): - assert_commit_serialization(self.gitrwrepo, self.head_sha_2k, True) + assert_commit_serialization(self.rwrepo, self.head_sha_2k, True) - rwrepo = self.gitrwrepo + rwrepo = self.rwrepo make_object = rwrepo.odb.store # direct serialization - deserialization can be tested afterwards # serialization is probably limited on IO diff --git a/git/test/performance/test_odb.py b/git/test/performance/test_odb.py deleted file mode 100644 index 32b70f69..00000000 --- a/git/test/performance/test_odb.py +++ /dev/null @@ -1,70 +0,0 @@ -"""Performance tests for object store""" - -from time import time -import sys -import stat - -from lib import ( - TestBigRepoR - ) - - -class TestObjDBPerformance(TestBigRepoR): - - def test_random_access(self): - results = [ ["Iterate Commits"], ["Iterate Blobs"], ["Retrieve Blob Data"] ] - for repo in (self.gitrorepo, self.puregitrorepo): - # GET COMMITS - st = time() - root_commit = repo.commit(self.head_sha_2k) - commits = list(root_commit.traverse()) - nc = len(commits) - elapsed = time() - st - - print >> sys.stderr, "%s: Retrieved %i commits from ObjectStore in %g s ( %f commits / s )" % (type(repo.odb), nc, elapsed, nc / elapsed) - results[0].append(elapsed) - - # GET TREES - # walk all trees of all commits - st = time() - blobs_per_commit = list() - nt = 0 - for commit in commits: - tree = commit.tree - blobs = list() - for item in tree.traverse(): - nt += 1 - if item.type == 'blob': - blobs.append(item) - # direct access for speed - # END while trees are there for walking - blobs_per_commit.append(blobs) - # END for each commit - elapsed = time() - st - - print >> sys.stderr, "%s: Retrieved %i objects from %i commits in %g s ( %f objects / s )" % (type(repo.odb), nt, len(commits), elapsed, nt / elapsed) - results[1].append(elapsed) - - # GET BLOBS - st = time() - nb = 0 - too_many = 15000 - data_bytes = 0 - for blob_list in blobs_per_commit: - for blob in blob_list: - data_bytes += len(blob.data_stream.read()) - # END for each blobsha - nb += len(blob_list) - if nb > too_many: - break - # END for each bloblist - elapsed = time() - st - - print >> sys.stderr, "%s: Retrieved %i blob (%i KiB) and their data in %g s ( %f blobs / s, %f KiB / s )" % (type(repo.odb), nb, data_bytes/1000, elapsed, nb / elapsed, (data_bytes / 1000) / elapsed) - results[2].append(elapsed) - # END for each repo type - - # final results - for test_name, a, b in results: - print >> sys.stderr, "%s: %f s vs %f s, pure is %f times slower" % (test_name, a, b, b / a) - # END for each result -- cgit v1.2.3 From d1032572162f91ee9e67e74321f329238fa32b15 Mon Sep 17 00:00:00 2001 From: Sebastian Thiel Date: Mon, 6 Jun 2011 16:55:57 +0200 Subject: Fixed packed ODB test, in preparation for separating the type to allow future implementations to use the test as well --- git/test/performance/db/odb_impl.py | 4 ++++ git/test/performance/lib.py | 4 +++- git/test/performance/test_pack.py | 19 ++++++++++++++----- 3 files changed, 21 insertions(+), 6 deletions(-) (limited to 'git/test/performance') diff --git a/git/test/performance/db/odb_impl.py b/git/test/performance/db/odb_impl.py index 50ee37e1..fd1abdee 100644 --- a/git/test/performance/db/odb_impl.py +++ b/git/test/performance/db/odb_impl.py @@ -17,6 +17,10 @@ class PerfBaseDeletorMetaClass(GlobalsItemDeletorMetaCls): class TestObjDBPerformanceBase(TestBigRepoR): __metaclass__ = PerfBaseDeletorMetaClass + #{ Configuration + RepoCls = None # to be set by subclass + #} END configuration + def test_random_access_test(self): repo = self.rorepo diff --git a/git/test/performance/lib.py b/git/test/performance/lib.py index cf80a0de..c92b0eea 100644 --- a/git/test/performance/lib.py +++ b/git/test/performance/lib.py @@ -7,6 +7,8 @@ from git.test.lib import ( import shutil import tempfile +from git import Repo + #{ Invvariants k_env_git_repo = "GIT_PYTHON_TEST_GIT_REPO_BASE" #} END invariants @@ -41,7 +43,7 @@ class TestBigRepoR(TestBase): #} END invariants #{ Configuration - RepoCls = None + RepoCls = Repo #} END configuration @classmethod diff --git a/git/test/performance/test_pack.py b/git/test/performance/test_pack.py index b1f3abe1..8c1207bc 100644 --- a/git/test/performance/test_pack.py +++ b/git/test/performance/test_pack.py @@ -8,17 +8,26 @@ from lib import ( ) from git.exc import UnsupportedOperation -from git.db.pack import PackedDB +from git.db.py.pack import PurePackedODB import sys import os from time import time import random -class TestPackedDBPerformance(TestBigRepoR): +class TestPurePackedODBPerformance(TestBigRepoR): - def _test_pack_random_access(self): - pdb = PackedDB(os.path.join(self.gitrepopath, "objects/pack")) + #{ Configuration + PackedODBCls = PurePackedODB + #} END configuration + + @classmethod + def setUpAll(cls): + super(TestPurePackedODBPerformance, cls).setUpAll() + cls.ropdb = cls.PackedODBCls(cls.rorepo.db_path("pack")) + + def test_pack_random_access(self): + pdb = self.ropdb # sha lookup st = time() @@ -66,7 +75,7 @@ class TestPackedDBPerformance(TestBigRepoR): print >> sys.stderr, "PDB: Obtained %i streams by sha and read all bytes totallying %i KiB ( %f KiB / s ) in %f s ( %f streams/s )" % (max_items, total_kib, total_kib/elapsed , elapsed, max_items / elapsed) def test_correctness(self): - pdb = PackedDB(os.path.join(self.gitrepopath, "objects/pack")) + pdb = self.ropdb # disabled for now as it used to work perfectly, checking big repositories takes a long time print >> sys.stderr, "Endurance run: verify streaming of objects (crc and sha)" for crc in range(2): -- cgit v1.2.3 From 155158e1410ff036812a87975cce6cb91aa8280e Mon Sep 17 00:00:00 2001 From: Sebastian Thiel Date: Mon, 6 Jun 2011 17:15:12 +0200 Subject: Added PackedDB test with generalized type to allows other implementations to be tested as well at some point --- git/test/performance/db/odb_impl.py | 1 - git/test/performance/db/packedodb_impl.py | 107 +++++++++++++++++++++++++ git/test/performance/db/test_packedodb_pure.py | 7 ++ git/test/performance/test_pack.py | 99 ----------------------- 4 files changed, 114 insertions(+), 100 deletions(-) create mode 100644 git/test/performance/db/packedodb_impl.py create mode 100644 git/test/performance/db/test_packedodb_pure.py delete mode 100644 git/test/performance/test_pack.py (limited to 'git/test/performance') diff --git a/git/test/performance/db/odb_impl.py b/git/test/performance/db/odb_impl.py index fd1abdee..677cf6a8 100644 --- a/git/test/performance/db/odb_impl.py +++ b/git/test/performance/db/odb_impl.py @@ -3,7 +3,6 @@ from time import time import sys import stat -import copy from git.test.performance.lib import ( TestBigRepoR, diff --git a/git/test/performance/db/packedodb_impl.py b/git/test/performance/db/packedodb_impl.py new file mode 100644 index 00000000..b95a8d13 --- /dev/null +++ b/git/test/performance/db/packedodb_impl.py @@ -0,0 +1,107 @@ +# Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors +# +# This module is part of GitDB and is released under +# the New BSD License: http://www.opensource.org/licenses/bsd-license.php +"""Performance tests for object store""" +from git.test.performance.lib import ( + TestBigRepoR, + GlobalsItemDeletorMetaCls + ) + +from git.exc import UnsupportedOperation + +import sys +import os +from time import time +import random + + +class PerfBaseDeletorMetaClass(GlobalsItemDeletorMetaCls): + ModuleToDelete = 'TestPurePackedODBPerformanceBase' + +class TestPurePackedODBPerformanceBase(TestBigRepoR): + __metaclass__ = PerfBaseDeletorMetaClass + + #{ Configuration + PackedODBCls = None + #} END configuration + + @classmethod + def setUpAll(cls): + super(TestPurePackedODBPerformanceBase, cls).setUpAll() + if cls.PackedODBCls is None: + raise AssertionError("PackedODBCls must be set in subclass") + #END assert configuration + cls.ropdb = cls.PackedODBCls(cls.rorepo.db_path("pack")) + + def test_pack_random_access(self): + pdb = self.ropdb + + # sha lookup + st = time() + sha_list = list(pdb.sha_iter()) + elapsed = time() - st + ns = len(sha_list) + print >> sys.stderr, "PDB: looked up %i shas by index in %f s ( %f shas/s )" % (ns, elapsed, ns / elapsed) + + # sha lookup: best-case and worst case access + pdb_pack_info = pdb._pack_info + # END shuffle shas + st = time() + for sha in sha_list: + pdb_pack_info(sha) + # END for each sha to look up + elapsed = time() - st + + # discard cache + del(pdb._entities) + pdb.entities() + print >> sys.stderr, "PDB: looked up %i sha in %i packs in %f s ( %f shas/s )" % (ns, len(pdb.entities()), elapsed, ns / elapsed) + # END for each random mode + + # query info and streams only + max_items = 10000 # can wait longer when testing memory + for pdb_fun in (pdb.info, pdb.stream): + st = time() + for sha in sha_list[:max_items]: + pdb_fun(sha) + elapsed = time() - st + print >> sys.stderr, "PDB: Obtained %i object %s by sha in %f s ( %f items/s )" % (max_items, pdb_fun.__name__.upper(), elapsed, max_items / elapsed) + # END for each function + + # retrieve stream and read all + max_items = 5000 + pdb_stream = pdb.stream + total_size = 0 + st = time() + for sha in sha_list[:max_items]: + stream = pdb_stream(sha) + stream.read() + total_size += stream.size + elapsed = time() - st + total_kib = total_size / 1000 + print >> sys.stderr, "PDB: Obtained %i streams by sha and read all bytes totallying %i KiB ( %f KiB / s ) in %f s ( %f streams/s )" % (max_items, total_kib, total_kib/elapsed , elapsed, max_items / elapsed) + + def test_correctness(self): + pdb = self.ropdb + # disabled for now as it used to work perfectly, checking big repositories takes a long time + print >> sys.stderr, "Endurance run: verify streaming of objects (crc and sha)" + for crc in range(2): + count = 0 + st = time() + for entity in pdb.entities(): + pack_verify = entity.is_valid_stream + sha_by_index = entity.index().sha + for index in xrange(entity.index().size()): + try: + assert pack_verify(sha_by_index(index), use_crc=crc) + count += 1 + except UnsupportedOperation: + pass + # END ignore old indices + # END for each index + # END for each entity + elapsed = time() - st + print >> sys.stderr, "PDB: verified %i objects (crc=%i) in %f s ( %f objects/s )" % (count, crc, elapsed, count / elapsed) + # END for each verify mode + diff --git a/git/test/performance/db/test_packedodb_pure.py b/git/test/performance/db/test_packedodb_pure.py new file mode 100644 index 00000000..7b9f2930 --- /dev/null +++ b/git/test/performance/db/test_packedodb_pure.py @@ -0,0 +1,7 @@ +from packedodb_impl import TestPurePackedODBPerformanceBase +from git.db.py.pack import PurePackedODB + +class TestPurePackedODB(TestPurePackedODBPerformanceBase): + #{ Configuration + PackedODBCls = PurePackedODB + #} END configuration diff --git a/git/test/performance/test_pack.py b/git/test/performance/test_pack.py deleted file mode 100644 index 8c1207bc..00000000 --- a/git/test/performance/test_pack.py +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors -# -# This module is part of GitDB and is released under -# the New BSD License: http://www.opensource.org/licenses/bsd-license.php -"""Performance tests for object store""" -from lib import ( - TestBigRepoR - ) - -from git.exc import UnsupportedOperation -from git.db.py.pack import PurePackedODB - -import sys -import os -from time import time -import random - -class TestPurePackedODBPerformance(TestBigRepoR): - - #{ Configuration - PackedODBCls = PurePackedODB - #} END configuration - - @classmethod - def setUpAll(cls): - super(TestPurePackedODBPerformance, cls).setUpAll() - cls.ropdb = cls.PackedODBCls(cls.rorepo.db_path("pack")) - - def test_pack_random_access(self): - pdb = self.ropdb - - # sha lookup - st = time() - sha_list = list(pdb.sha_iter()) - elapsed = time() - st - ns = len(sha_list) - print >> sys.stderr, "PDB: looked up %i shas by index in %f s ( %f shas/s )" % (ns, elapsed, ns / elapsed) - - # sha lookup: best-case and worst case access - pdb_pack_info = pdb._pack_info - # END shuffle shas - st = time() - for sha in sha_list: - pdb_pack_info(sha) - # END for each sha to look up - elapsed = time() - st - - # discard cache - del(pdb._entities) - pdb.entities() - print >> sys.stderr, "PDB: looked up %i sha in %i packs in %f s ( %f shas/s )" % (ns, len(pdb.entities()), elapsed, ns / elapsed) - # END for each random mode - - # query info and streams only - max_items = 10000 # can wait longer when testing memory - for pdb_fun in (pdb.info, pdb.stream): - st = time() - for sha in sha_list[:max_items]: - pdb_fun(sha) - elapsed = time() - st - print >> sys.stderr, "PDB: Obtained %i object %s by sha in %f s ( %f items/s )" % (max_items, pdb_fun.__name__.upper(), elapsed, max_items / elapsed) - # END for each function - - # retrieve stream and read all - max_items = 5000 - pdb_stream = pdb.stream - total_size = 0 - st = time() - for sha in sha_list[:max_items]: - stream = pdb_stream(sha) - stream.read() - total_size += stream.size - elapsed = time() - st - total_kib = total_size / 1000 - print >> sys.stderr, "PDB: Obtained %i streams by sha and read all bytes totallying %i KiB ( %f KiB / s ) in %f s ( %f streams/s )" % (max_items, total_kib, total_kib/elapsed , elapsed, max_items / elapsed) - - def test_correctness(self): - pdb = self.ropdb - # disabled for now as it used to work perfectly, checking big repositories takes a long time - print >> sys.stderr, "Endurance run: verify streaming of objects (crc and sha)" - for crc in range(2): - count = 0 - st = time() - for entity in pdb.entities(): - pack_verify = entity.is_valid_stream - sha_by_index = entity.index().sha - for index in xrange(entity.index().size()): - try: - assert pack_verify(sha_by_index(index), use_crc=crc) - count += 1 - except UnsupportedOperation: - pass - # END ignore old indices - # END for each index - # END for each entity - elapsed = time() - st - print >> sys.stderr, "PDB: verified %i objects (crc=%i) in %f s ( %f objects/s )" % (count, crc, elapsed, count / elapsed) - # END for each verify mode - -- cgit v1.2.3 From 47f14d527f61d30ffa49a6254838ca5c1aee3972 Mon Sep 17 00:00:00 2001 From: Sebastian Thiel Date: Mon, 6 Jun 2011 18:59:46 +0200 Subject: Added loose object writing and reading performance tessts, in pure and command implementations. The previous performance test was truncated a bit as it compared directly with the git hash_object write performance. This is out, and if we wanted it we could implement it , but its actually slower for us --- git/test/performance/db/looseodb_impl.py | 132 ++++++++++++ git/test/performance/db/test_looseodb_cmd.py | 11 + git/test/performance/db/test_looseodb_pure.py | 6 + git/test/performance/test_streams.py | 296 -------------------------- 4 files changed, 149 insertions(+), 296 deletions(-) create mode 100644 git/test/performance/db/looseodb_impl.py create mode 100644 git/test/performance/db/test_looseodb_cmd.py create mode 100644 git/test/performance/db/test_looseodb_pure.py delete mode 100644 git/test/performance/test_streams.py (limited to 'git/test/performance') diff --git a/git/test/performance/db/looseodb_impl.py b/git/test/performance/db/looseodb_impl.py new file mode 100644 index 00000000..6d3c1fa6 --- /dev/null +++ b/git/test/performance/db/looseodb_impl.py @@ -0,0 +1,132 @@ +"""Performance data streaming performance""" +from git.db.py import * +from git.base import * +from git.stream import * +from async import ChannelThreadTask +from git.util import ( + pool, + bin_to_hex + ) +import os +import sys +from time import time + +from git.test.lib import ( + GlobalsItemDeletorMetaCls, + make_memory_file, + with_rw_repo + ) + +from git.test.performance.lib import TestBigRepoR + + +#{ Utilities + +def read_chunked_stream(stream): + total = 0 + while True: + chunk = stream.read(chunk_size) + total += len(chunk) + if len(chunk) < chunk_size: + break + # END read stream loop + assert total == stream.size + return stream + + +class TestStreamReader(ChannelThreadTask): + """Expects input streams and reads them in chunks. It will read one at a time, + requireing a queue chunk of size 1""" + def __init__(self, *args): + super(TestStreamReader, self).__init__(*args) + self.fun = read_chunked_stream + self.max_chunksize = 1 + + +#} END utilities + +class PerfBaseDeletorMetaClass(GlobalsItemDeletorMetaCls): + ModuleToDelete = 'TestLooseDBWPerformanceBase' + + +class TestLooseDBWPerformanceBase(TestBigRepoR): + __metaclass__ = PerfBaseDeletorMetaClass + + large_data_size_bytes = 1000*1000*10 # some MiB should do it + moderate_data_size_bytes = 1000*1000*1 # just 1 MiB + + #{ Configuration + LooseODBCls = None + #} END configuration + + @classmethod + def setUpAll(cls): + super(TestLooseDBWPerformanceBase, cls).setUpAll() + if cls.LooseODBCls is None: + raise AssertionError("LooseODBCls must be set in subtype") + #END assert configuration + # currently there is no additional configuration + + @with_rw_repo("HEAD") + def test_large_data_streaming(self, rwrepo): + # TODO: This part overlaps with the same file in git.test.performance.test_stream + # It should be shared if possible + objects_path = rwrepo.db_path('') + ldb = self.LooseODBCls(objects_path) + + for randomize in range(2): + desc = (randomize and 'random ') or '' + print >> sys.stderr, "Creating %s data ..." % desc + st = time() + size, stream = make_memory_file(self.large_data_size_bytes, randomize) + elapsed = time() - st + print >> sys.stderr, "Done (in %f s)" % elapsed + + # writing - due to the compression it will seem faster than it is + st = time() + binsha = ldb.store(IStream('blob', size, stream)).binsha + elapsed_add = time() - st + assert ldb.has_object(binsha) + hexsha = bin_to_hex(binsha) + db_file = os.path.join(objects_path, hexsha[:2], hexsha[2:]) + fsize_kib = os.path.getsize(db_file) / 1000 + + + size_kib = size / 1000 + print >> sys.stderr, "%s: Added %i KiB (filesize = %i KiB) of %s data to loose odb in %f s ( %f Write KiB / s)" % (self.LooseODBCls.__name__, size_kib, fsize_kib, desc, elapsed_add, size_kib / elapsed_add) + + # reading all at once + st = time() + ostream = ldb.stream(binsha) + shadata = ostream.read() + elapsed_readall = time() - st + + stream.seek(0) + assert shadata == stream.getvalue() + print >> sys.stderr, "%s: Read %i KiB of %s data at once from loose odb in %f s ( %f Read KiB / s)" % (self.LooseODBCls.__name__, size_kib, desc, elapsed_readall, size_kib / elapsed_readall) + + + # reading in chunks of 1 MiB + cs = 512*1000 + chunks = list() + st = time() + ostream = ldb.stream(binsha) + while True: + data = ostream.read(cs) + chunks.append(data) + if len(data) < cs: + break + # END read in chunks + elapsed_readchunks = time() - st + + stream.seek(0) + assert ''.join(chunks) == stream.getvalue() + + cs_kib = cs / 1000 + print >> sys.stderr, "%s: Read %i KiB of %s data in %i KiB chunks from loose odb in %f s ( %f Read KiB / s)" % (self.LooseODBCls.__name__, size_kib, desc, cs_kib, elapsed_readchunks, size_kib / elapsed_readchunks) + + # del db file so git has something to do + os.remove(db_file) + # END for each randomization factor + + diff --git a/git/test/performance/db/test_looseodb_cmd.py b/git/test/performance/db/test_looseodb_cmd.py new file mode 100644 index 00000000..9738278c --- /dev/null +++ b/git/test/performance/db/test_looseodb_cmd.py @@ -0,0 +1,11 @@ +from git.db.complex import CmdCompatibilityGitDB +from looseodb_impl import TestLooseDBWPerformanceBase + +import sys + +class TestCmdLooseDB(TestLooseDBWPerformanceBase): + LooseODBCls = CmdCompatibilityGitDB + + def test_info(self): + sys.stderr.write("This test does not check the write performance of the git command as it is implemented in pure python") + diff --git a/git/test/performance/db/test_looseodb_pure.py b/git/test/performance/db/test_looseodb_pure.py new file mode 100644 index 00000000..46f39d5e --- /dev/null +++ b/git/test/performance/db/test_looseodb_pure.py @@ -0,0 +1,6 @@ +from git.db.py.loose import PureLooseObjectODB +from looseodb_impl import TestLooseDBWPerformanceBase + +class TestPureLooseDB(TestLooseDBWPerformanceBase): + LooseODBCls = PureLooseObjectODB + diff --git a/git/test/performance/test_streams.py b/git/test/performance/test_streams.py deleted file mode 100644 index 25c211b0..00000000 --- a/git/test/performance/test_streams.py +++ /dev/null @@ -1,296 +0,0 @@ -"""Performance data streaming performance""" -from git.db.py import * -from git.base import * -from git.stream import * -from git.util import ( - pool, - bin_to_hex - ) - -from git.test.lib import TestBase -from git import * -from git.util import bin_to_hex - -from cStringIO import StringIO -from time import time -import os -import sys -import stat -import subprocess - -from git.test.lib import make_memory_file - -from lib import ( - TestBigRepoR - make_memory_file, - with_rw_directory - ) - - -#{ Utilities -def read_chunked_stream(stream): - total = 0 - while True: - chunk = stream.read(chunk_size) - total += len(chunk) - if len(chunk) < chunk_size: - break - # END read stream loop - assert total == stream.size - return stream - - -class TestStreamReader(ChannelThreadTask): - """Expects input streams and reads them in chunks. It will read one at a time, - requireing a queue chunk of size 1""" - def __init__(self, *args): - super(TestStreamReader, self).__init__(*args) - self.fun = read_chunked_stream - self.max_chunksize = 1 - - -#} END utilities - -class TestObjDBPerformance(TestBigRepoR): - - large_data_size_bytes = 1000*1000*10 # some MiB should do it - moderate_data_size_bytes = 1000*1000*1 # just 1 MiB - - @with_rw_repo('HEAD', bare=True) - def test_large_data_streaming(self, rwrepo): - # TODO: This part overlaps with the same file in git.test.performance.test_stream - # It should be shared if possible - ldb = LooseObjectDB(os.path.join(rwrepo.git_dir, 'objects')) - - for randomize in range(2): - desc = (randomize and 'random ') or '' - print >> sys.stderr, "Creating %s data ..." % desc - st = time() - size, stream = make_memory_file(self.large_data_size_bytes, randomize) - elapsed = time() - st - print >> sys.stderr, "Done (in %f s)" % elapsed - - # writing - due to the compression it will seem faster than it is - st = time() - binsha = ldb.store(IStream('blob', size, stream)).binsha - elapsed_add = time() - st - assert ldb.has_object(binsha) - db_file = ldb.readable_db_object_path(bin_to_hex(binsha)) - fsize_kib = os.path.getsize(db_file) / 1000 - - - size_kib = size / 1000 - print >> sys.stderr, "Added %i KiB (filesize = %i KiB) of %s data to loose odb in %f s ( %f Write KiB / s)" % (size_kib, fsize_kib, desc, elapsed_add, size_kib / elapsed_add) - - # reading all at once - st = time() - ostream = ldb.stream(binsha) - shadata = ostream.read() - elapsed_readall = time() - st - - stream.seek(0) - assert shadata == stream.getvalue() - print >> sys.stderr, "Read %i KiB of %s data at once from loose odb in %f s ( %f Read KiB / s)" % (size_kib, desc, elapsed_readall, size_kib / elapsed_readall) - - - # reading in chunks of 1 MiB - cs = 512*1000 - chunks = list() - st = time() - ostream = ldb.stream(binsha) - while True: - data = ostream.read(cs) - chunks.append(data) - if len(data) < cs: - break - # END read in chunks - elapsed_readchunks = time() - st - - stream.seek(0) - assert ''.join(chunks) == stream.getvalue() - - cs_kib = cs / 1000 - print >> sys.stderr, "Read %i KiB of %s data in %i KiB chunks from loose odb in %f s ( %f Read KiB / s)" % (size_kib, desc, cs_kib, elapsed_readchunks, size_kib / elapsed_readchunks) - - # del db file so git has something to do - os.remove(db_file) - - # VS. CGIT - ########## - # CGIT ! Can using the cgit programs be faster ? - proc = rwrepo.git.hash_object('-w', '--stdin', as_process=True, istream=subprocess.PIPE) - - # write file - pump everything in at once to be a fast as possible - data = stream.getvalue() # cache it - st = time() - proc.stdin.write(data) - proc.stdin.close() - gitsha = proc.stdout.read().strip() - proc.wait() - gelapsed_add = time() - st - del(data) - assert gitsha == bin_to_hex(binsha) # we do it the same way, right ? - - # as its the same sha, we reuse our path - fsize_kib = os.path.getsize(db_file) / 1000 - print >> sys.stderr, "Added %i KiB (filesize = %i KiB) of %s data to using git-hash-object in %f s ( %f Write KiB / s)" % (size_kib, fsize_kib, desc, gelapsed_add, size_kib / gelapsed_add) - - # compare ... - print >> sys.stderr, "Git-Python is %f %% faster than git when adding big %s files" % (100.0 - (elapsed_add / gelapsed_add) * 100, desc) - - - # read all - st = time() - s, t, size, data = rwrepo.git.get_object_data(gitsha) - gelapsed_readall = time() - st - print >> sys.stderr, "Read %i KiB of %s data at once using git-cat-file in %f s ( %f Read KiB / s)" % (size_kib, desc, gelapsed_readall, size_kib / gelapsed_readall) - - # compare - print >> sys.stderr, "Git-Python is %f %% faster than git when reading big %sfiles" % (100.0 - (elapsed_readall / gelapsed_readall) * 100, desc) - - - # read chunks - st = time() - s, t, size, stream = rwrepo.git.stream_object_data(gitsha) - while True: - data = stream.read(cs) - if len(data) < cs: - break - # END read stream - gelapsed_readchunks = time() - st - print >> sys.stderr, "Read %i KiB of %s data in %i KiB chunks from git-cat-file in %f s ( %f Read KiB / s)" % (size_kib, desc, cs_kib, gelapsed_readchunks, size_kib / gelapsed_readchunks) - - # compare - print >> sys.stderr, "Git-Python is %f %% faster than git when reading big %s files in chunks" % (100.0 - (elapsed_readchunks / gelapsed_readchunks) * 100, desc) - # END for each randomization factor - - @with_rw_directory - def test_large_data_streaming(self, path): - ldb = PureLooseObjectODB(path) - string_ios = list() # list of streams we previously created - - # serial mode - for randomize in range(2): - desc = (randomize and 'random ') or '' - print >> sys.stderr, "Creating %s data ..." % desc - st = time() - size, stream = make_memory_file(self.large_data_size_bytes, randomize) - elapsed = time() - st - print >> sys.stderr, "Done (in %f s)" % elapsed - string_ios.append(stream) - - # writing - due to the compression it will seem faster than it is - st = time() - sha = ldb.store(IStream('blob', size, stream)).binsha - elapsed_add = time() - st - assert ldb.has_object(sha) - db_file = ldb.readable_db_object_path(bin_to_hex(sha)) - fsize_kib = os.path.getsize(db_file) / 1000 - - - size_kib = size / 1000 - print >> sys.stderr, "Added %i KiB (filesize = %i KiB) of %s data to loose odb in %f s ( %f Write KiB / s)" % (size_kib, fsize_kib, desc, elapsed_add, size_kib / elapsed_add) - - # reading all at once - st = time() - ostream = ldb.stream(sha) - shadata = ostream.read() - elapsed_readall = time() - st - - stream.seek(0) - assert shadata == stream.getvalue() - print >> sys.stderr, "Read %i KiB of %s data at once from loose odb in %f s ( %f Read KiB / s)" % (size_kib, desc, elapsed_readall, size_kib / elapsed_readall) - - - # reading in chunks of 1 MiB - cs = 512*1000 - chunks = list() - st = time() - ostream = ldb.stream(sha) - while True: - data = ostream.read(cs) - chunks.append(data) - if len(data) < cs: - break - # END read in chunks - elapsed_readchunks = time() - st - - stream.seek(0) - assert ''.join(chunks) == stream.getvalue() - - cs_kib = cs / 1000 - print >> sys.stderr, "Read %i KiB of %s data in %i KiB chunks from loose odb in %f s ( %f Read KiB / s)" % (size_kib, desc, cs_kib, elapsed_readchunks, size_kib / elapsed_readchunks) - - # del db file so we keep something to do - os.remove(db_file) - # END for each randomization factor - - - # multi-threaded mode - # want two, should be supported by most of todays cpus - pool.set_size(2) - total_kib = 0 - nsios = len(string_ios) - for stream in string_ios: - stream.seek(0) - total_kib += len(stream.getvalue()) / 1000 - # END rewind - - def istream_iter(): - for stream in string_ios: - stream.seek(0) - yield IStream(str_blob_type, len(stream.getvalue()), stream) - # END for each stream - # END util - - # write multiple objects at once, involving concurrent compression - reader = IteratorReader(istream_iter()) - istream_reader = ldb.store_async(reader) - istream_reader.task().max_chunksize = 1 - - st = time() - istreams = istream_reader.read(nsios) - assert len(istreams) == nsios - elapsed = time() - st - - print >> sys.stderr, "Threads(%i): Compressed %i KiB of data in loose odb in %f s ( %f Write KiB / s)" % (pool.size(), total_kib, elapsed, total_kib / elapsed) - - # decompress multiple at once, by reading them - # chunk size is not important as the stream will not really be decompressed - - # until its read - istream_reader = IteratorReader(iter([ i.binsha for i in istreams ])) - ostream_reader = ldb.stream_async(istream_reader) - - chunk_task = TestStreamReader(ostream_reader, "chunker", None) - output_reader = pool.add_task(chunk_task) - output_reader.task().max_chunksize = 1 - - st = time() - assert len(output_reader.read(nsios)) == nsios - elapsed = time() - st - - print >> sys.stderr, "Threads(%i): Decompressed %i KiB of data in loose odb in %f s ( %f Read KiB / s)" % (pool.size(), total_kib, elapsed, total_kib / elapsed) - - # store the files, and read them back. For the reading, we use a task - # as well which is chunked into one item per task. Reading all will - # very quickly result in two threads handling two bytestreams of - # chained compression/decompression streams - reader = IteratorReader(istream_iter()) - istream_reader = ldb.store_async(reader) - istream_reader.task().max_chunksize = 1 - - istream_to_sha = lambda items: [ i.binsha for i in items ] - istream_reader.set_post_cb(istream_to_sha) - - ostream_reader = ldb.stream_async(istream_reader) - - chunk_task = TestStreamReader(ostream_reader, "chunker", None) - output_reader = pool.add_task(chunk_task) - output_reader.max_chunksize = 1 - - st = time() - assert len(output_reader.read(nsios)) == nsios - elapsed = time() - st - - print >> sys.stderr, "Threads(%i): Compressed and decompressed and read %i KiB of data in loose odb in %f s ( %f Combined KiB / s)" % (pool.size(), total_kib, elapsed, total_kib / elapsed) -- cgit v1.2.3 From 79b9dbd63f3b1ec3f1ae9269ff44f9c07eeab9fa Mon Sep 17 00:00:00 2001 From: Sebastian Thiel Date: Mon, 6 Jun 2011 19:56:17 +0200 Subject: Fixed performance commit object testing --- git/test/performance/lib.py | 2 +- git/test/performance/test_commit.py | 15 ++++++++------- 2 files changed, 9 insertions(+), 8 deletions(-) (limited to 'git/test/performance') diff --git a/git/test/performance/lib.py b/git/test/performance/lib.py index c92b0eea..758d402d 100644 --- a/git/test/performance/lib.py +++ b/git/test/performance/lib.py @@ -65,7 +65,7 @@ class TestBigRepoRW(TestBigRepoR): super(TestBigRepoRW, cls).setUpAll() dirname = tempfile.mktemp() os.mkdir(dirname) - cls.rwrepo = cls.gitrorepo.clone(dirname, shared=True, bare=True) + cls.rwrepo = cls.rorepo.clone(dirname, shared=True, bare=True) @classmethod def tearDownAll(cls): diff --git a/git/test/performance/test_commit.py b/git/test/performance/test_commit.py index 72755e05..decc308e 100644 --- a/git/test/performance/test_commit.py +++ b/git/test/performance/test_commit.py @@ -6,16 +6,16 @@ from lib import * from git import * -from git import IStream -from git.test.test_commit import assert_commit_serialization +from git.base import IStream +from git.test.objects.test_commit import assert_commit_serialization from cStringIO import StringIO from time import time import sys class TestPerformance(TestBigRepoRW): - + # ref with about 100 commits in its history - ref_100 = '0.1.6' + ref_100 = 'v0.99' def _query_commit_info(self, c): c.author @@ -45,13 +45,14 @@ class TestPerformance(TestBigRepoRW): # END for each object # END for each commit elapsed_time = time() - st + assert no, "Should have traversed a few objects" print >> sys.stderr, "Traversed %i Trees and a total of %i unchached objects in %s [s] ( %f objs/s )" % (nc, no, elapsed_time, no/elapsed_time) def test_commit_traversal(self): # bound to cat-file parsing performance nc = 0 st = time() - for c in self.gitrorepo.commit(self.head_sha_2k).traverse(branch_first=False): + for c in self.rorepo.commit(self.head_sha_2k).traverse(branch_first=False): nc += 1 self._query_commit_info(c) # END for each traversed commit @@ -62,7 +63,7 @@ class TestPerformance(TestBigRepoRW): # bound to stream parsing performance nc = 0 st = time() - for c in Commit.iter_items(self.gitrorepo, self.head_sha_2k): + for c in Commit.iter_items(self.rorepo, self.head_sha_2k): nc += 1 self._query_commit_info(c) # END for each traversed commit @@ -73,7 +74,7 @@ class TestPerformance(TestBigRepoRW): assert_commit_serialization(self.rwrepo, self.head_sha_2k, True) rwrepo = self.rwrepo - make_object = rwrepo.odb.store + make_object = rwrepo.store # direct serialization - deserialization can be tested afterwards # serialization is probably limited on IO hc = rwrepo.commit(self.head_sha_2k) -- cgit v1.2.3 From 56a004bb8a08c216fe9764be866e6c00f079e257 Mon Sep 17 00:00:00 2001 From: Sebastian Thiel Date: Mon, 6 Jun 2011 20:00:07 +0200 Subject: Moved performance commit object test into the respective subfolder to indicate what it tests --- git/test/performance/objects/__init__.py | 1 + git/test/performance/objects/test_commit.py | 100 ++++++++++++++++++++++++++++ git/test/performance/test_commit.py | 100 ---------------------------- 3 files changed, 101 insertions(+), 100 deletions(-) create mode 100644 git/test/performance/objects/__init__.py create mode 100644 git/test/performance/objects/test_commit.py delete mode 100644 git/test/performance/test_commit.py (limited to 'git/test/performance') diff --git a/git/test/performance/objects/__init__.py b/git/test/performance/objects/__init__.py new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/git/test/performance/objects/__init__.py @@ -0,0 +1 @@ + diff --git a/git/test/performance/objects/test_commit.py b/git/test/performance/objects/test_commit.py new file mode 100644 index 00000000..685fba2f --- /dev/null +++ b/git/test/performance/objects/test_commit.py @@ -0,0 +1,100 @@ +# test_performance.py +# Copyright (C) 2008, 2009 Michael Trier (mtrier@gmail.com) and contributors +# +# This module is part of GitPython and is released under +# the BSD License: http://www.opensource.org/licenses/bsd-license.php + +from git.test.performance.lib import TestBigRepoRW +from git import * +from git.base import IStream +from git.test.objects.test_commit import assert_commit_serialization +from cStringIO import StringIO +from time import time +import sys + +class TestPerformance(TestBigRepoRW): + + # ref with about 100 commits in its history + ref_100 = 'v0.99' + + def _query_commit_info(self, c): + c.author + c.authored_date + c.author_tz_offset + c.committer + c.committed_date + c.committer_tz_offset + c.message + c.parents + + def test_iteration(self): + no = 0 + nc = 0 + + # find the first commit containing the given path - always do a full + # iteration ( restricted to the path in question ), but in fact it should + # return quite a lot of commits, we just take one and hence abort the operation + + st = time() + for c in self.rorepo.iter_commits(self.ref_100): + nc += 1 + self._query_commit_info(c) + for obj in c.tree.traverse(): + obj.size + no += 1 + # END for each object + # END for each commit + elapsed_time = time() - st + assert no, "Should have traversed a few objects" + print >> sys.stderr, "Traversed %i Trees and a total of %i unchached objects in %s [s] ( %f objs/s )" % (nc, no, elapsed_time, no/elapsed_time) + + def test_commit_traversal(self): + # bound to cat-file parsing performance + nc = 0 + st = time() + for c in self.rorepo.commit(self.head_sha_2k).traverse(branch_first=False): + nc += 1 + self._query_commit_info(c) + # END for each traversed commit + elapsed_time = time() - st + print >> sys.stderr, "Traversed %i Commits in %s [s] ( %f commits/s )" % (nc, elapsed_time, nc/elapsed_time) + + def test_commit_iteration(self): + # bound to stream parsing performance + nc = 0 + st = time() + for c in Commit.iter_items(self.rorepo, self.head_sha_2k): + nc += 1 + self._query_commit_info(c) + # END for each traversed commit + elapsed_time = time() - st + print >> sys.stderr, "Iterated %i Commits in %s [s] ( %f commits/s )" % (nc, elapsed_time, nc/elapsed_time) + + def test_commit_serialization(self): + assert_commit_serialization(self.rwrepo, self.head_sha_2k, True) + + rwrepo = self.rwrepo + make_object = rwrepo.store + # direct serialization - deserialization can be tested afterwards + # serialization is probably limited on IO + hc = rwrepo.commit(self.head_sha_2k) + + commits = list() + nc = 5000 + st = time() + for i in xrange(nc): + cm = Commit( rwrepo, Commit.NULL_BIN_SHA, hc.tree, + hc.author, hc.authored_date, hc.author_tz_offset, + hc.committer, hc.committed_date, hc.committer_tz_offset, + str(i), parents=hc.parents, encoding=hc.encoding) + + stream = StringIO() + cm._serialize(stream) + slen = stream.tell() + stream.seek(0) + + cm.binsha = make_object(IStream(Commit.type, slen, stream)).binsha + # END commit creation + elapsed = time() - st + + print >> sys.stderr, "Serialized %i commits to loose objects in %f s ( %f commits / s )" % (nc, elapsed, nc / elapsed) diff --git a/git/test/performance/test_commit.py b/git/test/performance/test_commit.py deleted file mode 100644 index decc308e..00000000 --- a/git/test/performance/test_commit.py +++ /dev/null @@ -1,100 +0,0 @@ -# test_performance.py -# Copyright (C) 2008, 2009 Michael Trier (mtrier@gmail.com) and contributors -# -# This module is part of GitPython and is released under -# the BSD License: http://www.opensource.org/licenses/bsd-license.php - -from lib import * -from git import * -from git.base import IStream -from git.test.objects.test_commit import assert_commit_serialization -from cStringIO import StringIO -from time import time -import sys - -class TestPerformance(TestBigRepoRW): - - # ref with about 100 commits in its history - ref_100 = 'v0.99' - - def _query_commit_info(self, c): - c.author - c.authored_date - c.author_tz_offset - c.committer - c.committed_date - c.committer_tz_offset - c.message - c.parents - - def test_iteration(self): - no = 0 - nc = 0 - - # find the first commit containing the given path - always do a full - # iteration ( restricted to the path in question ), but in fact it should - # return quite a lot of commits, we just take one and hence abort the operation - - st = time() - for c in self.rorepo.iter_commits(self.ref_100): - nc += 1 - self._query_commit_info(c) - for obj in c.tree.traverse(): - obj.size - no += 1 - # END for each object - # END for each commit - elapsed_time = time() - st - assert no, "Should have traversed a few objects" - print >> sys.stderr, "Traversed %i Trees and a total of %i unchached objects in %s [s] ( %f objs/s )" % (nc, no, elapsed_time, no/elapsed_time) - - def test_commit_traversal(self): - # bound to cat-file parsing performance - nc = 0 - st = time() - for c in self.rorepo.commit(self.head_sha_2k).traverse(branch_first=False): - nc += 1 - self._query_commit_info(c) - # END for each traversed commit - elapsed_time = time() - st - print >> sys.stderr, "Traversed %i Commits in %s [s] ( %f commits/s )" % (nc, elapsed_time, nc/elapsed_time) - - def test_commit_iteration(self): - # bound to stream parsing performance - nc = 0 - st = time() - for c in Commit.iter_items(self.rorepo, self.head_sha_2k): - nc += 1 - self._query_commit_info(c) - # END for each traversed commit - elapsed_time = time() - st - print >> sys.stderr, "Iterated %i Commits in %s [s] ( %f commits/s )" % (nc, elapsed_time, nc/elapsed_time) - - def test_commit_serialization(self): - assert_commit_serialization(self.rwrepo, self.head_sha_2k, True) - - rwrepo = self.rwrepo - make_object = rwrepo.store - # direct serialization - deserialization can be tested afterwards - # serialization is probably limited on IO - hc = rwrepo.commit(self.head_sha_2k) - - commits = list() - nc = 5000 - st = time() - for i in xrange(nc): - cm = Commit( rwrepo, Commit.NULL_BIN_SHA, hc.tree, - hc.author, hc.authored_date, hc.author_tz_offset, - hc.committer, hc.committed_date, hc.committer_tz_offset, - str(i), parents=hc.parents, encoding=hc.encoding) - - stream = StringIO() - cm._serialize(stream) - slen = stream.tell() - stream.seek(0) - - cm.binsha = make_object(IStream(Commit.type, slen, stream)).binsha - # END commit creation - elapsed = time() - st - - print >> sys.stderr, "Serialized %i commits to loose objects in %f s ( %f commits / s )" % (nc, elapsed, nc / elapsed) -- cgit v1.2.3 From ce79835556c195ed6e638a33280f729537dcee54 Mon Sep 17 00:00:00 2001 From: Sebastian Thiel Date: Mon, 6 Jun 2011 20:12:48 +0200 Subject: Fixed performance pack writing tests. As they are actually depent on the database (as streams have to be decompressed, it should be redesigned to have multiple database implementations) --- git/test/performance/db/test_packedodb_pure.py | 76 ++++++++++++++++++++++++ git/test/performance/test_pack_streaming.py | 80 -------------------------- 2 files changed, 76 insertions(+), 80 deletions(-) delete mode 100644 git/test/performance/test_pack_streaming.py (limited to 'git/test/performance') diff --git a/git/test/performance/db/test_packedodb_pure.py b/git/test/performance/db/test_packedodb_pure.py index 7b9f2930..f254c518 100644 --- a/git/test/performance/db/test_packedodb_pure.py +++ b/git/test/performance/db/test_packedodb_pure.py @@ -1,7 +1,83 @@ +# Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors +# +# This module is part of GitDB and is released under +# the New BSD License: http://www.opensource.org/licenses/bsd-license.php from packedodb_impl import TestPurePackedODBPerformanceBase from git.db.py.pack import PurePackedODB +from git.stream import NullStream + +from git.pack import PackEntity + +import os +import sys + +from time import time +from nose import SkipTest + + +class CountedNullStream(NullStream): + __slots__ = '_bw' + def __init__(self): + self._bw = 0 + + def bytes_written(self): + return self._bw + + def write(self, d): + self._bw += NullStream.write(self, d) + + class TestPurePackedODB(TestPurePackedODBPerformanceBase): #{ Configuration PackedODBCls = PurePackedODB #} END configuration + + def test_pack_writing(self): + # see how fast we can write a pack from object streams. + # This will not be fast, as we take time for decompressing the streams as well + ostream = CountedNullStream() + pdb = self.ropdb + + ni = 5000 + count = 0 + total_size = 0 + st = time() + objs = list() + for sha in pdb.sha_iter(): + count += 1 + objs.append(pdb.stream(sha)) + if count == ni: + break + #END gather objects for pack-writing + elapsed = time() - st + print >> sys.stderr, "PDB Streaming: Got %i streams by sha in in %f s ( %f streams/s )" % (ni, elapsed, ni / elapsed) + + st = time() + PackEntity.write_pack(objs, ostream.write) + elapsed = time() - st + total_kb = ostream.bytes_written() / 1000 + print >> sys.stderr, "PDB Streaming: Wrote pack of size %i kb in %f s (%f kb/s)" % (total_kb, elapsed, total_kb/elapsed) + + + def test_stream_reading(self): + raise SkipTest("This test was only used for --with-profile runs") + pdb = self.ropdb + + # streaming only, meant for --with-profile runs + ni = 5000 + count = 0 + pdb_stream = pdb.stream + total_size = 0 + st = time() + for sha in pdb.sha_iter(): + if count == ni: + break + stream = pdb_stream(sha) + stream.read() + total_size += stream.size + count += 1 + elapsed = time() - st + total_kib = total_size / 1000 + print >> sys.stderr, "PDB Streaming: Got %i streams by sha and read all bytes totallying %i KiB ( %f KiB / s ) in %f s ( %f streams/s )" % (ni, total_kib, total_kib/elapsed , elapsed, ni / elapsed) + diff --git a/git/test/performance/test_pack_streaming.py b/git/test/performance/test_pack_streaming.py deleted file mode 100644 index cc890ee0..00000000 --- a/git/test/performance/test_pack_streaming.py +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors -# -# This module is part of GitDB and is released under -# the New BSD License: http://www.opensource.org/licenses/bsd-license.php -"""Specific test for pack streams only""" -from lib import ( - TestBigRepoR - ) - -from git.db.pack import PackedDB -from git.stream import NullStream -from git.pack import PackEntity - -import os -import sys -from time import time -from nose import SkipTest - -class CountedNullStream(NullStream): - __slots__ = '_bw' - def __init__(self): - self._bw = 0 - - def bytes_written(self): - return self._bw - - def write(self, d): - self._bw += NullStream.write(self, d) - - -class TestPackStreamingPerformance(TestBigRepoR): - - def test_pack_writing(self): - # see how fast we can write a pack from object streams. - # This will not be fast, as we take time for decompressing the streams as well - ostream = CountedNullStream() - pdb = PackedDB(os.path.join(self.gitrepopath, "objects/pack")) - - ni = 5000 - count = 0 - total_size = 0 - st = time() - objs = list() - for sha in pdb.sha_iter(): - count += 1 - objs.append(pdb.stream(sha)) - if count == ni: - break - #END gather objects for pack-writing - elapsed = time() - st - print >> sys.stderr, "PDB Streaming: Got %i streams by sha in in %f s ( %f streams/s )" % (ni, elapsed, ni / elapsed) - - st = time() - PackEntity.write_pack(objs, ostream.write) - elapsed = time() - st - total_kb = ostream.bytes_written() / 1000 - print >> sys.stderr, "PDB Streaming: Wrote pack of size %i kb in %f s (%f kb/s)" % (total_kb, elapsed, total_kb/elapsed) - - - def test_stream_reading(self): - raise SkipTest() - pdb = PackedDB(os.path.join(self.gitrepopath, "objects/pack")) - - # streaming only, meant for --with-profile runs - ni = 5000 - count = 0 - pdb_stream = pdb.stream - total_size = 0 - st = time() - for sha in pdb.sha_iter(): - if count == ni: - break - stream = pdb_stream(sha) - stream.read() - total_size += stream.size - count += 1 - elapsed = time() - st - total_kib = total_size / 1000 - print >> sys.stderr, "PDB Streaming: Got %i streams by sha and read all bytes totallying %i KiB ( %f KiB / s ) in %f s ( %f streams/s )" % (ni, total_kib, total_kib/elapsed , elapsed, ni / elapsed) - -- cgit v1.2.3 From 3c12de3762abcde33dd27151b49589da76c2132f Mon Sep 17 00:00:00 2001 From: Sebastian Thiel Date: Mon, 6 Jun 2011 20:46:53 +0200 Subject: Improved pack writing test to show that the pack generation can be lightning fast with nearly now overhead if the data streams in fast enough (~30 MB/s when writing a pack). This shows that there is huge potential for sending packs, considering that we are actually recompressing them (without deltification). To be faster in future, we could probably just send ref-deltas or full objects as found in the pack without doing any recompression. --- git/test/performance/db/test_packedodb_pure.py | 49 +++++++++++++++----------- 1 file changed, 28 insertions(+), 21 deletions(-) (limited to 'git/test/performance') diff --git a/git/test/performance/db/test_packedodb_pure.py b/git/test/performance/db/test_packedodb_pure.py index f254c518..4ea09779 100644 --- a/git/test/performance/db/test_packedodb_pure.py +++ b/git/test/performance/db/test_packedodb_pure.py @@ -33,31 +33,38 @@ class TestPurePackedODB(TestPurePackedODBPerformanceBase): PackedODBCls = PurePackedODB #} END configuration + def test_pack_writing_note(self): + sys.stderr.write("test_pack_writing should be adjusted to support different databases to read from - see test for more info") + raise SkipTest() + def test_pack_writing(self): # see how fast we can write a pack from object streams. # This will not be fast, as we take time for decompressing the streams as well + # For now we test the fast streaming and slow streaming versions manually ostream = CountedNullStream() - pdb = self.ropdb - - ni = 5000 - count = 0 - total_size = 0 - st = time() - objs = list() - for sha in pdb.sha_iter(): - count += 1 - objs.append(pdb.stream(sha)) - if count == ni: - break - #END gather objects for pack-writing - elapsed = time() - st - print >> sys.stderr, "PDB Streaming: Got %i streams by sha in in %f s ( %f streams/s )" % (ni, elapsed, ni / elapsed) - - st = time() - PackEntity.write_pack(objs, ostream.write) - elapsed = time() - st - total_kb = ostream.bytes_written() / 1000 - print >> sys.stderr, "PDB Streaming: Wrote pack of size %i kb in %f s (%f kb/s)" % (total_kb, elapsed, total_kb/elapsed) + # NOTE: We use the same repo twice to see whether OS caching helps + for rorepo in (self.rorepo, self.rorepo, self.ropdb): + + ni = 5000 + count = 0 + total_size = 0 + st = time() + objs = list() + for sha in rorepo.sha_iter(): + count += 1 + objs.append(rorepo.stream(sha)) + if count == ni: + break + #END gather objects for pack-writing + elapsed = time() - st + print >> sys.stderr, "PDB Streaming: Got %i streams from %s by sha in in %f s ( %f streams/s )" % (ni, rorepo.__class__.__name__, elapsed, ni / elapsed) + + st = time() + PackEntity.write_pack(objs, ostream.write) + elapsed = time() - st + total_kb = ostream.bytes_written() / 1000 + print >> sys.stderr, "PDB Streaming: Wrote pack of size %i kb in %f s (%f kb/s)" % (total_kb, elapsed, total_kb/elapsed) + #END for each rorepo def test_stream_reading(self): -- cgit v1.2.3