aboutsummaryrefslogtreecommitdiff
path: root/git/test/performance
diff options
context:
space:
mode:
Diffstat (limited to 'git/test/performance')
-rw-r--r--git/test/performance/__init__.py2
-rw-r--r--git/test/performance/db/__init__.py1
-rw-r--r--git/test/performance/db/looseodb_impl.py132
-rw-r--r--git/test/performance/db/odb_impl.py72
-rw-r--r--git/test/performance/db/packedodb_impl.py107
-rw-r--r--git/test/performance/db/test_looseodb_cmd.py11
-rw-r--r--git/test/performance/db/test_looseodb_pure.py6
-rw-r--r--git/test/performance/db/test_odb_cmd.py6
-rw-r--r--git/test/performance/db/test_odb_pure.py6
-rw-r--r--git/test/performance/db/test_packedodb_pure.py90
-rw-r--r--git/test/performance/lib.py38
-rw-r--r--git/test/performance/objects/__init__.py1
-rw-r--r--git/test/performance/objects/test_commit.py (renamed from git/test/performance/test_commit.py)21
-rw-r--r--git/test/performance/test_odb.py70
-rw-r--r--git/test/performance/test_streams.py131
15 files changed, 462 insertions, 232 deletions
diff --git a/git/test/performance/__init__.py b/git/test/performance/__init__.py
new file mode 100644
index 00000000..6bd117b9
--- /dev/null
+++ b/git/test/performance/__init__.py
@@ -0,0 +1,2 @@
+"""Note: This init file makes the performance tests an integral part of the test suite
+as nose will now pick them up. Previously the init file was intentionally omitted"""
diff --git a/git/test/performance/db/__init__.py b/git/test/performance/db/__init__.py
new file mode 100644
index 00000000..8b137891
--- /dev/null
+++ b/git/test/performance/db/__init__.py
@@ -0,0 +1 @@
+
diff --git a/git/test/performance/db/looseodb_impl.py b/git/test/performance/db/looseodb_impl.py
new file mode 100644
index 00000000..6d3c1fa6
--- /dev/null
+++ b/git/test/performance/db/looseodb_impl.py
@@ -0,0 +1,132 @@
+"""Performance data streaming performance"""
+from git.db.py import *
+from git.base import *
+from git.stream import *
+from async import ChannelThreadTask
+from git.util import (
+ pool,
+ bin_to_hex
+ )
+import os
+import sys
+from time import time
+
+from git.test.lib import (
+ GlobalsItemDeletorMetaCls,
+ make_memory_file,
+ with_rw_repo
+ )
+
+from git.test.performance.lib import TestBigRepoR
+
+
+#{ Utilities
+
+def read_chunked_stream(stream):
+ total = 0
+ while True:
+ chunk = stream.read(chunk_size)
+ total += len(chunk)
+ if len(chunk) < chunk_size:
+ break
+ # END read stream loop
+ assert total == stream.size
+ return stream
+
+
+class TestStreamReader(ChannelThreadTask):
+ """Expects input streams and reads them in chunks. It will read one at a time,
+ requireing a queue chunk of size 1"""
+ def __init__(self, *args):
+ super(TestStreamReader, self).__init__(*args)
+ self.fun = read_chunked_stream
+ self.max_chunksize = 1
+
+
+#} END utilities
+
+class PerfBaseDeletorMetaClass(GlobalsItemDeletorMetaCls):
+ ModuleToDelete = 'TestLooseDBWPerformanceBase'
+
+
+class TestLooseDBWPerformanceBase(TestBigRepoR):
+ __metaclass__ = PerfBaseDeletorMetaClass
+
+ large_data_size_bytes = 1000*1000*10 # some MiB should do it
+ moderate_data_size_bytes = 1000*1000*1 # just 1 MiB
+
+ #{ Configuration
+ LooseODBCls = None
+ #} END configuration
+
+ @classmethod
+ def setUpAll(cls):
+ super(TestLooseDBWPerformanceBase, cls).setUpAll()
+ if cls.LooseODBCls is None:
+ raise AssertionError("LooseODBCls must be set in subtype")
+ #END assert configuration
+ # currently there is no additional configuration
+
+ @with_rw_repo("HEAD")
+ def test_large_data_streaming(self, rwrepo):
+ # TODO: This part overlaps with the same file in git.test.performance.test_stream
+ # It should be shared if possible
+ objects_path = rwrepo.db_path('')
+ ldb = self.LooseODBCls(objects_path)
+
+ for randomize in range(2):
+ desc = (randomize and 'random ') or ''
+ print >> sys.stderr, "Creating %s data ..." % desc
+ st = time()
+ size, stream = make_memory_file(self.large_data_size_bytes, randomize)
+ elapsed = time() - st
+ print >> sys.stderr, "Done (in %f s)" % elapsed
+
+ # writing - due to the compression it will seem faster than it is
+ st = time()
+ binsha = ldb.store(IStream('blob', size, stream)).binsha
+ elapsed_add = time() - st
+ assert ldb.has_object(binsha)
+ hexsha = bin_to_hex(binsha)
+ db_file = os.path.join(objects_path, hexsha[:2], hexsha[2:])
+ fsize_kib = os.path.getsize(db_file) / 1000
+
+
+ size_kib = size / 1000
+ print >> sys.stderr, "%s: Added %i KiB (filesize = %i KiB) of %s data to loose odb in %f s ( %f Write KiB / s)" % (self.LooseODBCls.__name__, size_kib, fsize_kib, desc, elapsed_add, size_kib / elapsed_add)
+
+ # reading all at once
+ st = time()
+ ostream = ldb.stream(binsha)
+ shadata = ostream.read()
+ elapsed_readall = time() - st
+
+ stream.seek(0)
+ assert shadata == stream.getvalue()
+ print >> sys.stderr, "%s: Read %i KiB of %s data at once from loose odb in %f s ( %f Read KiB / s)" % (self.LooseODBCls.__name__, size_kib, desc, elapsed_readall, size_kib / elapsed_readall)
+
+
+ # reading in chunks of 1 MiB
+ cs = 512*1000
+ chunks = list()
+ st = time()
+ ostream = ldb.stream(binsha)
+ while True:
+ data = ostream.read(cs)
+ chunks.append(data)
+ if len(data) < cs:
+ break
+ # END read in chunks
+ elapsed_readchunks = time() - st
+
+ stream.seek(0)
+ assert ''.join(chunks) == stream.getvalue()
+
+ cs_kib = cs / 1000
+ print >> sys.stderr, "%s: Read %i KiB of %s data in %i KiB chunks from loose odb in %f s ( %f Read KiB / s)" % (self.LooseODBCls.__name__, size_kib, desc, cs_kib, elapsed_readchunks, size_kib / elapsed_readchunks)
+
+ # del db file so git has something to do
+ os.remove(db_file)
+ # END for each randomization factor
+
+
diff --git a/git/test/performance/db/odb_impl.py b/git/test/performance/db/odb_impl.py
new file mode 100644
index 00000000..677cf6a8
--- /dev/null
+++ b/git/test/performance/db/odb_impl.py
@@ -0,0 +1,72 @@
+"""Performance tests for object store"""
+
+from time import time
+import sys
+import stat
+
+from git.test.performance.lib import (
+ TestBigRepoR,
+ GlobalsItemDeletorMetaCls
+ )
+
+class PerfBaseDeletorMetaClass(GlobalsItemDeletorMetaCls):
+ ModuleToDelete = 'TestObjDBPerformanceBase'
+
+
+class TestObjDBPerformanceBase(TestBigRepoR):
+ __metaclass__ = PerfBaseDeletorMetaClass
+
+ #{ Configuration
+ RepoCls = None # to be set by subclass
+ #} END configuration
+
+ def test_random_access_test(self):
+ repo = self.rorepo
+
+ # GET COMMITS
+ st = time()
+ root_commit = repo.commit(self.head_sha_2k)
+ commits = list(root_commit.traverse())
+ nc = len(commits)
+ elapsed = time() - st
+
+ print >> sys.stderr, "%s: Retrieved %i commits from ObjectStore in %g s ( %f commits / s )" % (type(repo.odb), nc, elapsed, nc / elapsed)
+
+ # GET TREES
+ # walk all trees of all commits
+ st = time()
+ blobs_per_commit = list()
+ nt = 0
+ for commit in commits:
+ tree = commit.tree
+ blobs = list()
+ for item in tree.traverse():
+ nt += 1
+ if item.type == 'blob':
+ blobs.append(item)
+ # direct access for speed
+ # END while trees are there for walking
+ blobs_per_commit.append(blobs)
+ # END for each commit
+ elapsed = time() - st
+
+ print >> sys.stderr, "%s: Retrieved %i objects from %i commits in %g s ( %f objects / s )" % (type(repo.odb), nt, len(commits), elapsed, nt / elapsed)
+
+ # GET BLOBS
+ st = time()
+ nb = 0
+ too_many = 15000
+ data_bytes = 0
+ for blob_list in blobs_per_commit:
+ for blob in blob_list:
+ data_bytes += len(blob.data_stream.read())
+ # END for each blobsha
+ nb += len(blob_list)
+ if nb > too_many:
+ break
+ # END for each bloblist
+ elapsed = time() - st
+
+ print >> sys.stderr, "%s: Retrieved %i blob (%i KiB) and their data in %g s ( %f blobs / s, %f KiB / s )" % (type(repo.odb), nb, data_bytes/1000, elapsed, nb / elapsed, (data_bytes / 1000) / elapsed)
+
+
diff --git a/git/test/performance/db/packedodb_impl.py b/git/test/performance/db/packedodb_impl.py
new file mode 100644
index 00000000..b95a8d13
--- /dev/null
+++ b/git/test/performance/db/packedodb_impl.py
@@ -0,0 +1,107 @@
+# Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
+#
+# This module is part of GitDB and is released under
+# the New BSD License: http://www.opensource.org/licenses/bsd-license.php
+"""Performance tests for object store"""
+from git.test.performance.lib import (
+ TestBigRepoR,
+ GlobalsItemDeletorMetaCls
+ )
+
+from git.exc import UnsupportedOperation
+
+import sys
+import os
+from time import time
+import random
+
+
+class PerfBaseDeletorMetaClass(GlobalsItemDeletorMetaCls):
+ ModuleToDelete = 'TestPurePackedODBPerformanceBase'
+
+class TestPurePackedODBPerformanceBase(TestBigRepoR):
+ __metaclass__ = PerfBaseDeletorMetaClass
+
+ #{ Configuration
+ PackedODBCls = None
+ #} END configuration
+
+ @classmethod
+ def setUpAll(cls):
+ super(TestPurePackedODBPerformanceBase, cls).setUpAll()
+ if cls.PackedODBCls is None:
+ raise AssertionError("PackedODBCls must be set in subclass")
+ #END assert configuration
+ cls.ropdb = cls.PackedODBCls(cls.rorepo.db_path("pack"))
+
+ def test_pack_random_access(self):
+ pdb = self.ropdb
+
+ # sha lookup
+ st = time()
+ sha_list = list(pdb.sha_iter())
+ elapsed = time() - st
+ ns = len(sha_list)
+ print >> sys.stderr, "PDB: looked up %i shas by index in %f s ( %f shas/s )" % (ns, elapsed, ns / elapsed)
+
+ # sha lookup: best-case and worst case access
+ pdb_pack_info = pdb._pack_info
+ # END shuffle shas
+ st = time()
+ for sha in sha_list:
+ pdb_pack_info(sha)
+ # END for each sha to look up
+ elapsed = time() - st
+
+ # discard cache
+ del(pdb._entities)
+ pdb.entities()
+ print >> sys.stderr, "PDB: looked up %i sha in %i packs in %f s ( %f shas/s )" % (ns, len(pdb.entities()), elapsed, ns / elapsed)
+ # END for each random mode
+
+ # query info and streams only
+ max_items = 10000 # can wait longer when testing memory
+ for pdb_fun in (pdb.info, pdb.stream):
+ st = time()
+ for sha in sha_list[:max_items]:
+ pdb_fun(sha)
+ elapsed = time() - st
+ print >> sys.stderr, "PDB: Obtained %i object %s by sha in %f s ( %f items/s )" % (max_items, pdb_fun.__name__.upper(), elapsed, max_items / elapsed)
+ # END for each function
+
+ # retrieve stream and read all
+ max_items = 5000
+ pdb_stream = pdb.stream
+ total_size = 0
+ st = time()
+ for sha in sha_list[:max_items]:
+ stream = pdb_stream(sha)
+ stream.read()
+ total_size += stream.size
+ elapsed = time() - st
+ total_kib = total_size / 1000
+ print >> sys.stderr, "PDB: Obtained %i streams by sha and read all bytes totallying %i KiB ( %f KiB / s ) in %f s ( %f streams/s )" % (max_items, total_kib, total_kib/elapsed , elapsed, max_items / elapsed)
+
+ def test_correctness(self):
+ pdb = self.ropdb
+ # disabled for now as it used to work perfectly, checking big repositories takes a long time
+ print >> sys.stderr, "Endurance run: verify streaming of objects (crc and sha)"
+ for crc in range(2):
+ count = 0
+ st = time()
+ for entity in pdb.entities():
+ pack_verify = entity.is_valid_stream
+ sha_by_index = entity.index().sha
+ for index in xrange(entity.index().size()):
+ try:
+ assert pack_verify(sha_by_index(index), use_crc=crc)
+ count += 1
+ except UnsupportedOperation:
+ pass
+ # END ignore old indices
+ # END for each index
+ # END for each entity
+ elapsed = time() - st
+ print >> sys.stderr, "PDB: verified %i objects (crc=%i) in %f s ( %f objects/s )" % (count, crc, elapsed, count / elapsed)
+ # END for each verify mode
+
diff --git a/git/test/performance/db/test_looseodb_cmd.py b/git/test/performance/db/test_looseodb_cmd.py
new file mode 100644
index 00000000..9738278c
--- /dev/null
+++ b/git/test/performance/db/test_looseodb_cmd.py
@@ -0,0 +1,11 @@
+from git.db.complex import CmdCompatibilityGitDB
+from looseodb_impl import TestLooseDBWPerformanceBase
+
+import sys
+
+class TestCmdLooseDB(TestLooseDBWPerformanceBase):
+ LooseODBCls = CmdCompatibilityGitDB
+
+ def test_info(self):
+ sys.stderr.write("This test does not check the write performance of the git command as it is implemented in pure python")
+
diff --git a/git/test/performance/db/test_looseodb_pure.py b/git/test/performance/db/test_looseodb_pure.py
new file mode 100644
index 00000000..46f39d5e
--- /dev/null
+++ b/git/test/performance/db/test_looseodb_pure.py
@@ -0,0 +1,6 @@
+from git.db.py.loose import PureLooseObjectODB
+from looseodb_impl import TestLooseDBWPerformanceBase
+
+class TestPureLooseDB(TestLooseDBWPerformanceBase):
+ LooseODBCls = PureLooseObjectODB
+
diff --git a/git/test/performance/db/test_odb_cmd.py b/git/test/performance/db/test_odb_cmd.py
new file mode 100644
index 00000000..acd55cc9
--- /dev/null
+++ b/git/test/performance/db/test_odb_cmd.py
@@ -0,0 +1,6 @@
+from git.db.complex import CmdCompatibilityGitDB
+from odb_impl import TestObjDBPerformanceBase
+
+class TestCmdDB(TestObjDBPerformanceBase):
+ RepoCls = CmdCompatibilityGitDB
+
diff --git a/git/test/performance/db/test_odb_pure.py b/git/test/performance/db/test_odb_pure.py
new file mode 100644
index 00000000..6ed3585d
--- /dev/null
+++ b/git/test/performance/db/test_odb_pure.py
@@ -0,0 +1,6 @@
+from git.db.complex import PureCompatibilityGitDB
+from odb_impl import TestObjDBPerformanceBase
+
+class TestPureDB(TestObjDBPerformanceBase):
+ RepoCls = PureCompatibilityGitDB
+
diff --git a/git/test/performance/db/test_packedodb_pure.py b/git/test/performance/db/test_packedodb_pure.py
new file mode 100644
index 00000000..4ea09779
--- /dev/null
+++ b/git/test/performance/db/test_packedodb_pure.py
@@ -0,0 +1,90 @@
+# Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
+#
+# This module is part of GitDB and is released under
+# the New BSD License: http://www.opensource.org/licenses/bsd-license.php
+from packedodb_impl import TestPurePackedODBPerformanceBase
+from git.db.py.pack import PurePackedODB
+
+from git.stream import NullStream
+
+from git.pack import PackEntity
+
+import os
+import sys
+
+from time import time
+from nose import SkipTest
+
+
+class CountedNullStream(NullStream):
+ __slots__ = '_bw'
+ def __init__(self):
+ self._bw = 0
+
+ def bytes_written(self):
+ return self._bw
+
+ def write(self, d):
+ self._bw += NullStream.write(self, d)
+
+
+class TestPurePackedODB(TestPurePackedODBPerformanceBase):
+ #{ Configuration
+ PackedODBCls = PurePackedODB
+ #} END configuration
+
+ def test_pack_writing_note(self):
+ sys.stderr.write("test_pack_writing should be adjusted to support different databases to read from - see test for more info")
+ raise SkipTest()
+
+ def test_pack_writing(self):
+ # see how fast we can write a pack from object streams.
+ # This will not be fast, as we take time for decompressing the streams as well
+ # For now we test the fast streaming and slow streaming versions manually
+ ostream = CountedNullStream()
+ # NOTE: We use the same repo twice to see whether OS caching helps
+ for rorepo in (self.rorepo, self.rorepo, self.ropdb):
+
+ ni = 5000
+ count = 0
+ total_size = 0
+ st = time()
+ objs = list()
+ for sha in rorepo.sha_iter():
+ count += 1
+ objs.append(rorepo.stream(sha))
+ if count == ni:
+ break
+ #END gather objects for pack-writing
+ elapsed = time() - st
+ print >> sys.stderr, "PDB Streaming: Got %i streams from %s by sha in in %f s ( %f streams/s )" % (ni, rorepo.__class__.__name__, elapsed, ni / elapsed)
+
+ st = time()
+ PackEntity.write_pack(objs, ostream.write)
+ elapsed = time() - st
+ total_kb = ostream.bytes_written() / 1000
+ print >> sys.stderr, "PDB Streaming: Wrote pack of size %i kb in %f s (%f kb/s)" % (total_kb, elapsed, total_kb/elapsed)
+ #END for each rorepo
+
+
+ def test_stream_reading(self):
+ raise SkipTest("This test was only used for --with-profile runs")
+ pdb = self.ropdb
+
+ # streaming only, meant for --with-profile runs
+ ni = 5000
+ count = 0
+ pdb_stream = pdb.stream
+ total_size = 0
+ st = time()
+ for sha in pdb.sha_iter():
+ if count == ni:
+ break
+ stream = pdb_stream(sha)
+ stream.read()
+ total_size += stream.size
+ count += 1
+ elapsed = time() - st
+ total_kib = total_size / 1000
+ print >> sys.stderr, "PDB Streaming: Got %i streams by sha and read all bytes totallying %i KiB ( %f KiB / s ) in %f s ( %f streams/s )" % (ni, total_kib, total_kib/elapsed , elapsed, ni / elapsed)
+
diff --git a/git/test/performance/lib.py b/git/test/performance/lib.py
index d0727b60..758d402d 100644
--- a/git/test/performance/lib.py
+++ b/git/test/performance/lib.py
@@ -1,17 +1,13 @@
"""Contains library functions"""
import os
-from git.test.lib import *
+from git.test.lib import (
+ TestBase,
+ GlobalsItemDeletorMetaCls
+ )
import shutil
import tempfile
-from git.db import (
- GitCmdObjectDB,
- GitDB
- )
-
-from git import (
- Repo
- )
+from git import Repo
#{ Invvariants
k_env_git_repo = "GIT_PYTHON_TEST_GIT_REPO_BASE"
@@ -38,11 +34,7 @@ class TestBigRepoR(TestBase):
* gitrorepo
- * Read-Only git repository - actually the repo of git itself
-
- * puregitrorepo
-
- * As gitrepo, but uses pure python implementation
+ * a big read-only git repository
"""
#{ Invariants
@@ -50,29 +42,33 @@ class TestBigRepoR(TestBase):
head_sha_50 = '32347c375250fd470973a5d76185cac718955fd5'
#} END invariants
+ #{ Configuration
+ RepoCls = Repo
+ #} END configuration
+
@classmethod
def setUpAll(cls):
super(TestBigRepoR, cls).setUpAll()
- repo_path = resolve_or_fail(k_env_git_repo)
- cls.gitrorepo = Repo(repo_path, odbt=GitCmdObjectDB)
- cls.puregitrorepo = Repo(repo_path, odbt=GitDB)
+ if cls.RepoCls is None:
+ raise AssertionError("Require RepoCls in class %s to be set" % cls)
+ #END assert configuration
+ cls.rorepo = cls.RepoCls(resolve_or_fail(k_env_git_repo))
class TestBigRepoRW(TestBigRepoR):
"""As above, but provides a big repository that we can write to.
- Provides ``self.gitrwrepo`` and ``self.puregitrwrepo``"""
+ Provides ``self.rwrepo``"""
@classmethod
def setUpAll(cls):
super(TestBigRepoRW, cls).setUpAll()
dirname = tempfile.mktemp()
os.mkdir(dirname)
- cls.gitrwrepo = cls.gitrorepo.clone(dirname, shared=True, bare=True, odbt=GitCmdObjectDB)
- cls.puregitrwrepo = Repo(dirname, odbt=GitDB)
+ cls.rwrepo = cls.rorepo.clone(dirname, shared=True, bare=True)
@classmethod
def tearDownAll(cls):
- shutil.rmtree(cls.gitrwrepo.working_dir)
+ shutil.rmtree(cls.rwrepo.working_dir)
#} END base classes
diff --git a/git/test/performance/objects/__init__.py b/git/test/performance/objects/__init__.py
new file mode 100644
index 00000000..8b137891
--- /dev/null
+++ b/git/test/performance/objects/__init__.py
@@ -0,0 +1 @@
+
diff --git a/git/test/performance/test_commit.py b/git/test/performance/objects/test_commit.py
index 80421aa2..685fba2f 100644
--- a/git/test/performance/test_commit.py
+++ b/git/test/performance/objects/test_commit.py
@@ -4,18 +4,18 @@
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
-from lib import *
+from git.test.performance.lib import TestBigRepoRW
from git import *
-from gitdb import IStream
-from git.test.test_commit import assert_commit_serialization
+from git.base import IStream
+from git.test.objects.test_commit import assert_commit_serialization
from cStringIO import StringIO
from time import time
import sys
class TestPerformance(TestBigRepoRW):
-
+
# ref with about 100 commits in its history
- ref_100 = '0.1.6'
+ ref_100 = 'v0.99'
def _query_commit_info(self, c):
c.author
@@ -45,13 +45,14 @@ class TestPerformance(TestBigRepoRW):
# END for each object
# END for each commit
elapsed_time = time() - st
+ assert no, "Should have traversed a few objects"
print >> sys.stderr, "Traversed %i Trees and a total of %i unchached objects in %s [s] ( %f objs/s )" % (nc, no, elapsed_time, no/elapsed_time)
def test_commit_traversal(self):
# bound to cat-file parsing performance
nc = 0
st = time()
- for c in self.gitrorepo.commit(self.head_sha_2k).traverse(branch_first=False):
+ for c in self.rorepo.commit(self.head_sha_2k).traverse(branch_first=False):
nc += 1
self._query_commit_info(c)
# END for each traversed commit
@@ -62,7 +63,7 @@ class TestPerformance(TestBigRepoRW):
# bound to stream parsing performance
nc = 0
st = time()
- for c in Commit.iter_items(self.gitrorepo, self.head_sha_2k):
+ for c in Commit.iter_items(self.rorepo, self.head_sha_2k):
nc += 1
self._query_commit_info(c)
# END for each traversed commit
@@ -70,10 +71,10 @@ class TestPerformance(TestBigRepoRW):
print >> sys.stderr, "Iterated %i Commits in %s [s] ( %f commits/s )" % (nc, elapsed_time, nc/elapsed_time)
def test_commit_serialization(self):
- assert_commit_serialization(self.gitrwrepo, self.head_sha_2k, True)
+ assert_commit_serialization(self.rwrepo, self.head_sha_2k, True)
- rwrepo = self.gitrwrepo
- make_object = rwrepo.odb.store
+ rwrepo = self.rwrepo
+ make_object = rwrepo.store
# direct serialization - deserialization can be tested afterwards
# serialization is probably limited on IO
hc = rwrepo.commit(self.head_sha_2k)
diff --git a/git/test/performance/test_odb.py b/git/test/performance/test_odb.py
deleted file mode 100644
index 32b70f69..00000000
--- a/git/test/performance/test_odb.py
+++ /dev/null
@@ -1,70 +0,0 @@
-"""Performance tests for object store"""
-
-from time import time
-import sys
-import stat
-
-from lib import (
- TestBigRepoR
- )
-
-
-class TestObjDBPerformance(TestBigRepoR):
-
- def test_random_access(self):
- results = [ ["Iterate Commits"], ["Iterate Blobs"], ["Retrieve Blob Data"] ]
- for repo in (self.gitrorepo, self.puregitrorepo):
- # GET COMMITS
- st = time()
- root_commit = repo.commit(self.head_sha_2k)
- commits = list(root_commit.traverse())
- nc = len(commits)
- elapsed = time() - st
-
- print >> sys.stderr, "%s: Retrieved %i commits from ObjectStore in %g s ( %f commits / s )" % (type(repo.odb), nc, elapsed, nc / elapsed)
- results[0].append(elapsed)
-
- # GET TREES
- # walk all trees of all commits
- st = time()
- blobs_per_commit = list()
- nt = 0
- for commit in commits:
- tree = commit.tree
- blobs = list()
- for item in tree.traverse():
- nt += 1
- if item.type == 'blob':
- blobs.append(item)
- # direct access for speed
- # END while trees are there for walking
- blobs_per_commit.append(blobs)
- # END for each commit
- elapsed = time() - st
-
- print >> sys.stderr, "%s: Retrieved %i objects from %i commits in %g s ( %f objects / s )" % (type(repo.odb), nt, len(commits), elapsed, nt / elapsed)
- results[1].append(elapsed)
-
- # GET BLOBS
- st = time()
- nb = 0
- too_many = 15000
- data_bytes = 0
- for blob_list in blobs_per_commit:
- for blob in blob_list:
- data_bytes += len(blob.data_stream.read())
- # END for each blobsha
- nb += len(blob_list)
- if nb > too_many:
- break
- # END for each bloblist
- elapsed = time() - st
-
- print >> sys.stderr, "%s: Retrieved %i blob (%i KiB) and their data in %g s ( %f blobs / s, %f KiB / s )" % (type(repo.odb), nb, data_bytes/1000, elapsed, nb / elapsed, (data_bytes / 1000) / elapsed)
- results[2].append(elapsed)
- # END for each repo type
-
- # final results
- for test_name, a, b in results:
- print >> sys.stderr, "%s: %f s vs %f s, pure is %f times slower" % (test_name, a, b, b / a)
- # END for each result
diff --git a/git/test/performance/test_streams.py b/git/test/performance/test_streams.py
deleted file mode 100644
index 7f17d722..00000000
--- a/git/test/performance/test_streams.py
+++ /dev/null
@@ -1,131 +0,0 @@
-"""Performance data streaming performance"""
-
-from git.test.lib import *
-from gitdb import *
-from gitdb.util import bin_to_hex
-
-from time import time
-import os
-import sys
-import stat
-import subprocess
-
-from gitdb.test.lib import make_memory_file
-
-from lib import (
- TestBigRepoR
- )
-
-
-class TestObjDBPerformance(TestBigRepoR):
-
- large_data_size_bytes = 1000*1000*10 # some MiB should do it
- moderate_data_size_bytes = 1000*1000*1 # just 1 MiB
-
- @with_rw_repo('HEAD', bare=True)
- def test_large_data_streaming(self, rwrepo):
- # TODO: This part overlaps with the same file in gitdb.test.performance.test_stream
- # It should be shared if possible
- ldb = LooseObjectDB(os.path.join(rwrepo.git_dir, 'objects'))
-
- for randomize in range(2):
- desc = (randomize and 'random ') or ''
- print >> sys.stderr, "Creating %s data ..." % desc
- st = time()
- size, stream = make_memory_file(self.large_data_size_bytes, randomize)
- elapsed = time() - st
- print >> sys.stderr, "Done (in %f s)" % elapsed
-
- # writing - due to the compression it will seem faster than it is
- st = time()
- binsha = ldb.store(IStream('blob', size, stream)).binsha
- elapsed_add = time() - st
- assert ldb.has_object(binsha)
- db_file = ldb.readable_db_object_path(bin_to_hex(binsha))
- fsize_kib = os.path.getsize(db_file) / 1000
-
-
- size_kib = size / 1000
- print >> sys.stderr, "Added %i KiB (filesize = %i KiB) of %s data to loose odb in %f s ( %f Write KiB / s)" % (size_kib, fsize_kib, desc, elapsed_add, size_kib / elapsed_add)
-
- # reading all at once
- st = time()
- ostream = ldb.stream(binsha)
- shadata = ostream.read()
- elapsed_readall = time() - st
-
- stream.seek(0)
- assert shadata == stream.getvalue()
- print >> sys.stderr, "Read %i KiB of %s data at once from loose odb in %f s ( %f Read KiB / s)" % (size_kib, desc, elapsed_readall, size_kib / elapsed_readall)
-
-
- # reading in chunks of 1 MiB
- cs = 512*1000
- chunks = list()
- st = time()
- ostream = ldb.stream(binsha)
- while True:
- data = ostream.read(cs)
- chunks.append(data)
- if len(data) < cs:
- break
- # END read in chunks
- elapsed_readchunks = time() - st
-
- stream.seek(0)
- assert ''.join(chunks) == stream.getvalue()
-
- cs_kib = cs / 1000
- print >> sys.stderr, "Read %i KiB of %s data in %i KiB chunks from loose odb in %f s ( %f Read KiB / s)" % (size_kib, desc, cs_kib, elapsed_readchunks, size_kib / elapsed_readchunks)
-
- # del db file so git has something to do
- os.remove(db_file)
-
- # VS. CGIT
- ##########
- # CGIT ! Can using the cgit programs be faster ?
- proc = rwrepo.git.hash_object('-w', '--stdin', as_process=True, istream=subprocess.PIPE)
-
- # write file - pump everything in at once to be a fast as possible
- data = stream.getvalue() # cache it
- st = time()
- proc.stdin.write(data)
- proc.stdin.close()
- gitsha = proc.stdout.read().strip()
- proc.wait()
- gelapsed_add = time() - st
- del(data)
- assert gitsha == bin_to_hex(binsha) # we do it the same way, right ?
-
- # as its the same sha, we reuse our path
- fsize_kib = os.path.getsize(db_file) / 1000
- print >> sys.stderr, "Added %i KiB (filesize = %i KiB) of %s data to using git-hash-object in %f s ( %f Write KiB / s)" % (size_kib, fsize_kib, desc, gelapsed_add, size_kib / gelapsed_add)
-
- # compare ...
- print >> sys.stderr, "Git-Python is %f %% faster than git when adding big %s files" % (100.0 - (elapsed_add / gelapsed_add) * 100, desc)
-
-
- # read all
- st = time()
- s, t, size, data = rwrepo.git.get_object_data(gitsha)
- gelapsed_readall = time() - st
- print >> sys.stderr, "Read %i KiB of %s data at once using git-cat-file in %f s ( %f Read KiB / s)" % (size_kib, desc, gelapsed_readall, size_kib / gelapsed_readall)
-
- # compare
- print >> sys.stderr, "Git-Python is %f %% faster than git when reading big %sfiles" % (100.0 - (elapsed_readall / gelapsed_readall) * 100, desc)
-
-
- # read chunks
- st = time()
- s, t, size, stream = rwrepo.git.stream_object_data(gitsha)
- while True:
- data = stream.read(cs)
- if len(data) < cs:
- break
- # END read stream
- gelapsed_readchunks = time() - st
- print >> sys.stderr, "Read %i KiB of %s data in %i KiB chunks from git-cat-file in %f s ( %f Read KiB / s)" % (size_kib, desc, cs_kib, gelapsed_readchunks, size_kib / gelapsed_readchunks)
-
- # compare
- print >> sys.stderr, "Git-Python is %f %% faster than git when reading big %s files in chunks" % (100.0 - (elapsed_readchunks / gelapsed_readchunks) * 100, desc)
- # END for each randomization factor