[FIX] report: always close pdf files after merge

If an exception during the merges (such as a file descriptor overrun), we
would otherwise depend on the next garbage collection to close the
files. But the next GC may never come.

For example if we ran out of OS file descriptors during merge, all future
requests will crash for the same reason, and the process will never recover
because the GC will never run.

Much easier to explicitly close the files all the time.
This commit is contained in:
Olivier Dony 2017-05-10 18:58:01 +02:00
parent 3978713996
commit e0014387c5
No known key found for this signature in database
GPG Key ID: CD556E25E8A6D0D4
1 changed files with 16 additions and 12 deletions

View File

@ -560,19 +560,23 @@ class Report(osv.Model):
"""
writer = PdfFileWriter()
streams = [] # We have to close the streams *after* PdfFilWriter's call to write()
for document in documents:
pdfreport = file(document, 'rb')
streams.append(pdfreport)
reader = PdfFileReader(pdfreport)
for page in range(0, reader.getNumPages()):
writer.addPage(reader.getPage(page))
try:
for document in documents:
pdfreport = file(document, 'rb')
streams.append(pdfreport)
reader = PdfFileReader(pdfreport)
for page in range(0, reader.getNumPages()):
writer.addPage(reader.getPage(page))
merged_file_fd, merged_file_path = tempfile.mkstemp(suffix='.html', prefix='report.merged.tmp.')
with closing(os.fdopen(merged_file_fd, 'w')) as merged_file:
writer.write(merged_file)
for stream in streams:
stream.close()
merged_file_fd, merged_file_path = tempfile.mkstemp(suffix='.pdf', prefix='report.merged.tmp.')
with closing(os.fdopen(merged_file_fd, 'w')) as merged_file:
writer.write(merged_file)
finally:
for stream in streams:
try:
stream.close()
except Exception:
pass
return merged_file_path