aboutsummaryrefslogtreecommitdiff
path: root/git/test/performance
diff options
context:
space:
mode:
Diffstat (limited to 'git/test/performance')
-rw-r--r--git/test/performance/test_commit.py8
-rw-r--r--git/test/performance/test_odb.py4
-rw-r--r--git/test/performance/test_streams.py6
-rw-r--r--git/test/performance/test_utils.py16
4 files changed, 17 insertions, 17 deletions
diff --git a/git/test/performance/test_commit.py b/git/test/performance/test_commit.py
index 79555844..adee2567 100644
--- a/git/test/performance/test_commit.py
+++ b/git/test/performance/test_commit.py
@@ -46,7 +46,7 @@ class TestPerformance(TestBigRepoRW):
# END for each object
# END for each commit
elapsed_time = time() - st
- print >> sys.stderr, "Traversed %i Trees and a total of %i unchached objects in %s [s] ( %f objs/s )" % (nc, no, elapsed_time, no/elapsed_time)
+ print >> sys.stderr, "Traversed %i Trees and a total of %i unchached objects in %s [s] ( %f objs/s )" % (nc, no, elapsed_time, no / elapsed_time)
def test_commit_traversal(self):
# bound to cat-file parsing performance
@@ -57,7 +57,7 @@ class TestPerformance(TestBigRepoRW):
self._query_commit_info(c)
# END for each traversed commit
elapsed_time = time() - st
- print >> sys.stderr, "Traversed %i Commits in %s [s] ( %f commits/s )" % (nc, elapsed_time, nc/elapsed_time)
+ print >> sys.stderr, "Traversed %i Commits in %s [s] ( %f commits/s )" % (nc, elapsed_time, nc / elapsed_time)
def test_commit_iteration(self):
# bound to stream parsing performance
@@ -68,7 +68,7 @@ class TestPerformance(TestBigRepoRW):
self._query_commit_info(c)
# END for each traversed commit
elapsed_time = time() - st
- print >> sys.stderr, "Iterated %i Commits in %s [s] ( %f commits/s )" % (nc, elapsed_time, nc/elapsed_time)
+ print >> sys.stderr, "Iterated %i Commits in %s [s] ( %f commits/s )" % (nc, elapsed_time, nc / elapsed_time)
def test_commit_serialization(self):
assert_commit_serialization(self.gitrwrepo, self.head_sha_2k, True)
@@ -83,7 +83,7 @@ class TestPerformance(TestBigRepoRW):
nc = 5000
st = time()
for i in xrange(nc):
- cm = Commit( rwrepo, Commit.NULL_BIN_SHA, hc.tree,
+ cm = Commit(rwrepo, Commit.NULL_BIN_SHA, hc.tree,
hc.author, hc.authored_date, hc.author_tz_offset,
hc.committer, hc.committed_date, hc.committer_tz_offset,
str(i), parents=hc.parents, encoding=hc.encoding)
diff --git a/git/test/performance/test_odb.py b/git/test/performance/test_odb.py
index 57a953ab..5ddbbd53 100644
--- a/git/test/performance/test_odb.py
+++ b/git/test/performance/test_odb.py
@@ -12,7 +12,7 @@ from lib import (
class TestObjDBPerformance(TestBigRepoR):
def test_random_access(self):
- results = [ ["Iterate Commits"], ["Iterate Blobs"], ["Retrieve Blob Data"] ]
+ results = [["Iterate Commits"], ["Iterate Blobs"], ["Retrieve Blob Data"]]
for repo in (self.gitrorepo, self.puregitrorepo):
# GET COMMITS
st = time()
@@ -60,7 +60,7 @@ class TestObjDBPerformance(TestBigRepoR):
# END for each bloblist
elapsed = time() - st
- print >> sys.stderr, "%s: Retrieved %i blob (%i KiB) and their data in %g s ( %f blobs / s, %f KiB / s )" % (type(repo.odb), nb, data_bytes/1000, elapsed, nb / elapsed, (data_bytes / 1000) / elapsed)
+ print >> sys.stderr, "%s: Retrieved %i blob (%i KiB) and their data in %g s ( %f blobs / s, %f KiB / s )" % (type(repo.odb), nb, data_bytes / 1000, elapsed, nb / elapsed, (data_bytes / 1000) / elapsed)
results[2].append(elapsed)
# END for each repo type
diff --git a/git/test/performance/test_streams.py b/git/test/performance/test_streams.py
index c8b59da6..32ae98bf 100644
--- a/git/test/performance/test_streams.py
+++ b/git/test/performance/test_streams.py
@@ -19,8 +19,8 @@ from lib import (
class TestObjDBPerformance(TestBigRepoR):
- large_data_size_bytes = 1000*1000*10 # some MiB should do it
- moderate_data_size_bytes = 1000*1000*1 # just 1 MiB
+ large_data_size_bytes = 1000 * 1000 * 10 # some MiB should do it
+ moderate_data_size_bytes = 1000 * 1000 * 1 # just 1 MiB
@with_rw_repo('HEAD', bare=True)
def test_large_data_streaming(self, rwrepo):
@@ -58,7 +58,7 @@ class TestObjDBPerformance(TestBigRepoR):
print >> sys.stderr, "Read %i KiB of %s data at once from loose odb in %f s ( %f Read KiB / s)" % (size_kib, desc, elapsed_readall, size_kib / elapsed_readall)
# reading in chunks of 1 MiB
- cs = 512*1000
+ cs = 512 * 1000
chunks = list()
st = time()
ostream = ldb.stream(binsha)
diff --git a/git/test/performance/test_utils.py b/git/test/performance/test_utils.py
index 4979eaa1..19c37a5f 100644
--- a/git/test/performance/test_utils.py
+++ b/git/test/performance/test_utils.py
@@ -65,7 +65,7 @@ class TestUtilPerformance(TestBigRepoR):
def test_instantiation(self):
ni = 100000
max_num_items = 4
- for mni in range(max_num_items+1):
+ for mni in range(max_num_items + 1):
for cls in (tuple, list):
st = time()
for i in xrange(ni):
@@ -74,11 +74,11 @@ class TestUtilPerformance(TestBigRepoR):
elif mni == 1:
cls((1,))
elif mni == 2:
- cls((1,2))
+ cls((1, 2))
elif mni == 3:
- cls((1,2,3))
+ cls((1, 2, 3))
elif mni == 4:
- cls((1,2,3,4))
+ cls((1, 2, 3, 4))
else:
cls(x for x in xrange(mni))
# END handle empty cls
@@ -91,22 +91,22 @@ class TestUtilPerformance(TestBigRepoR):
# tuple and tuple direct
st = time()
for i in xrange(ni):
- t = (1,2,3,4)
+ t = (1, 2, 3, 4)
# END for each item
elapsed = time() - st
print >> sys.stderr, "Created %i tuples (1,2,3,4) in %f s ( %f tuples / s)" % (ni, elapsed, ni / elapsed)
st = time()
for i in xrange(ni):
- t = tuple((1,2,3,4))
+ t = tuple((1, 2, 3, 4))
# END for each item
elapsed = time() - st
print >> sys.stderr, "Created %i tuples tuple((1,2,3,4)) in %f s ( %f tuples / s)" % (ni, elapsed, ni / elapsed)
def test_unpacking_vs_indexing(self):
ni = 1000000
- list_items = [1,2,3,4]
- tuple_items = (1,2,3,4)
+ list_items = [1, 2, 3, 4]
+ tuple_items = (1, 2, 3, 4)
for sequence in (list_items, tuple_items):
st = time()