Commit dcb0f6a8 authored by Sushil Khanchi's avatar Sushil Khanchi 🐨
Browse files

diff: use BytesIO over bytes for performance reason

parent 8de2ca488696
Pipeline #20124 passed with stages
in 1 minute and 56 seconds
...@@ -131,7 +131,7 @@ ...@@ -131,7 +131,7 @@
bytes_count = attr.ib(0) bytes_count = attr.ib(0)
collapsed = attr.ib(False) collapsed = attr.ib(False)
line_count = attr.ib(0) line_count = attr.ib(0)
patch = attr.ib(b'') patch = attr.ib(BytesIO())
too_large = attr.ib(False) too_large = attr.ib(False)
...@@ -186,7 +186,9 @@ ...@@ -186,7 +186,9 @@
) )
def parse(self, iter_file_hunks): def parse(self, iter_file_hunks):
# For explicitness, let's instantiate a new BytesIO obj for each file
self.curr_diff.patch = BytesIO()
for diffhunk in iter_file_hunks: for diffhunk in iter_file_hunks:
if diffhunk.startswith(b'@@'): if diffhunk.startswith(b'@@'):
newdiffhunk = diffhunk.split(b'\n', 1)[-1] newdiffhunk = diffhunk.split(b'\n', 1)[-1]
lines = newdiffhunk.splitlines(True) lines = newdiffhunk.splitlines(True)
...@@ -189,10 +191,10 @@ ...@@ -189,10 +191,10 @@
for diffhunk in iter_file_hunks: for diffhunk in iter_file_hunks:
if diffhunk.startswith(b'@@'): if diffhunk.startswith(b'@@'):
newdiffhunk = diffhunk.split(b'\n', 1)[-1] newdiffhunk = diffhunk.split(b'\n', 1)[-1]
lines = newdiffhunk.splitlines(True) lines = newdiffhunk.splitlines(True)
self.curr_diff.patch += diffhunk self.curr_diff.patch.write(diffhunk)
self.curr_diff.bytes_count += len(newdiffhunk) self.curr_diff.bytes_count += len(newdiffhunk)
self.curr_diff.line_count += len(lines) self.curr_diff.line_count += len(lines)
if self.limits.collapse_diffs: if self.limits.collapse_diffs:
if self.is_over_safe_limits(): if self.is_over_safe_limits():
...@@ -194,11 +196,11 @@ ...@@ -194,11 +196,11 @@
self.curr_diff.bytes_count += len(newdiffhunk) self.curr_diff.bytes_count += len(newdiffhunk)
self.curr_diff.line_count += len(lines) self.curr_diff.line_count += len(lines)
if self.limits.collapse_diffs: if self.limits.collapse_diffs:
if self.is_over_safe_limits(): if self.is_over_safe_limits():
self.curr_diff.patch = b'' self.curr_diff.patch = BytesIO()
self.curr_diff.collapsed = True self.curr_diff.collapsed = True
if self.limits.enforce_limits: if self.limits.enforce_limits:
# Apply single-file size limit # Apply single-file size limit
# Note: here we are not comparing with curr_diff.bytes_count # Note: here we are not comparing with curr_diff.bytes_count
...@@ -200,10 +202,11 @@ ...@@ -200,10 +202,11 @@
self.curr_diff.collapsed = True self.curr_diff.collapsed = True
if self.limits.enforce_limits: if self.limits.enforce_limits:
# Apply single-file size limit # Apply single-file size limit
# Note: here we are not comparing with curr_diff.bytes_count # Note: here we are not comparing with curr_diff.bytes_count
if len(self.curr_diff.patch) >= self.limits.max_patch_bytes: curr_patch = self.curr_diff.patch.getvalue()
self.curr_diff.patch = b'' if len(curr_patch) >= self.limits.max_patch_bytes:
self.curr_diff.patch = BytesIO()
self.curr_diff.too_large = True self.curr_diff.too_large = True
if self.is_over_limits(): if self.is_over_limits():
...@@ -302,7 +305,7 @@ ...@@ -302,7 +305,7 @@
too_large=curr_diff.too_large, too_large=curr_diff.too_large,
collapsed=curr_diff.collapsed, collapsed=curr_diff.collapsed,
) )
patch = curr_diff.patch patch = curr_diff.patch.getvalue()
patch_itr = split_batches(patch, DIFF_MSG_SIZE_THRESHOLD) patch_itr = split_batches(patch, DIFF_MSG_SIZE_THRESHOLD)
for p, eop in iter_boolean_lookahead(patch_itr): for p, eop in iter_boolean_lookahead(patch_itr):
response.raw_patch_data = p response.raw_patch_data = p
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment