home *** CD-ROM | disk | FTP | other *** search
Wrap
# Source Generated with Decompyle++ # File: in.pyc (Python 2.6) from __future__ import with_statement __license__ = 'GPL v3' __copyright__ = '2009, Kovid Goyal kovid@kovidgoyal.net and Marshall T. Vandegrift <llasram@gmail.com>' __docformat__ = 'restructuredtext en' from struct import pack, unpack from cStringIO import StringIO from calibre.ebooks.mobi import MobiError from calibre.ebooks.mobi.writer import rescale_image, MAX_THUMB_DIMEN from calibre.ebooks.mobi.langcodes import iana2mobi from calibre.utils.date import now as nowf class StreamSlicer(object): def __init__(self, stream, start = 0, stop = None): self._stream = stream self.start = start if stop is None: stream.seek(0, 2) stop = stream.tell() self.stop = stop self._len = stop - start def __len__(self): return self._len def __getitem__(self, key): stream = self._stream base = self.start if isinstance(key, (int, long)): stream.seek(base + key) return stream.read(1) if isinstance(key, slice): (start, stop, stride) = key.indices(self._len) if stride < 0: start = stop stop = start size = stop - start if size <= 0: return '' stream.seek(base + start) data = stream.read(size) if stride != 1: data = data[::stride] return data raise TypeError('stream indices must be integers') def __setitem__(self, key, value): stream = self._stream base = self.start if isinstance(key, (int, long)): if len(value) != 1: raise ValueError('key and value lengths must match') len(value) != 1 stream.seek(base + key) return stream.write(value) if isinstance(key, slice): (start, stop, stride) = key.indices(self._len) if stride < 0: start = stop stop = start size = stop - start if stride != 1: value = value[::stride] if len(value) != size: raise ValueError('key and value lengths must match') len(value) != size stream.seek(base + start) return stream.write(value) raise TypeError('stream indices must be integers') def update(self, data_blocks): stream = self._stream base = self.start stream.seek(base) self._stream.truncate(base) for block in data_blocks: stream.write(block) def truncate(self, value): self._stream.truncate(value) class MetadataUpdater(object): DRM_KEY_SIZE = 48 def __init__(self, stream): self.stream = stream data = self.data = StreamSlicer(stream) self.type = data[60:68] if self.type != 'BOOKMOBI': return None (self.nrecs,) = unpack('>H', data[76:78]) (mobi_header_length,) = unpack('>I', record0[20:24]) if not mobi_header_length: raise MobiError("Non-standard file format. Try 'Convert E-Books' with MOBI as Input and Output formats.") mobi_header_length (self.encryption_type,) = unpack('>H', record0[12:14]) (codepage,) = unpack('>I', record0[28:32]) self.codec = record0 = self.record0 = self.record(0) if codepage == 65001 else 'cp1252' (image_base,) = unpack('>I', record0[108:112]) self.cover_record = None self.thumbnail_record = None self.timestamp = None self.pdbrecords = self.get_pdbrecords() self.drm_block = None if self.encryption_type != 0: if self.have_exth: self.drm_block = self.fetchDRMdata() else: raise MobiError('Unable to set metadata on DRM file without EXTH header') self.have_exth self.original_exth_records = { } self.fetchEXTHFields() def fetchDRMdata(self): drm_offset = int(unpack('>I', self.record0[168:172])[0]) self.drm_key_count = int(unpack('>I', self.record0[172:176])[0]) drm_keys = '' for x in range(self.drm_key_count): base_addr = drm_offset + x * self.DRM_KEY_SIZE drm_keys += self.record0[base_addr:base_addr + self.DRM_KEY_SIZE] return drm_keys def fetchEXTHFields(self): stream = self.stream record0 = self.record0 exth_off = unpack('>I', record0[20:24])[0] + 16 + record0.start (image_base,) = unpack('>I', record0[108:112]) exth = self.exth = StreamSlicer(stream, exth_off, record0.stop) (nitems,) = unpack('>I', exth[8:12]) pos = 12 for i in xrange(nitems): (id, size) = unpack('>II', exth[pos:pos + 8]) content = exth[pos + 8:pos + size] pos += size self.original_exth_records[id] = content if id == 106: self.timestamp = content continue if id == 201: (rindex,) = (self.cover_rindex,) = unpack('>i', content) if rindex > 0: self.cover_record = self.record(rindex + image_base) rindex > 0 if id == 202: (rindex,) = (self.thumbnail_rindex,) = unpack('>i', content) if rindex > 0: self.thumbnail_record = self.record(rindex + image_base) rindex > 0 def patch(self, off, new_record0): record_sizes = [ len(new_record0)] for i in range(1, self.nrecs - 1): record_sizes.append(self.pdbrecords[i + 1][0] - self.pdbrecords[i][0]) record_sizes.append(self.data.stop - self.pdbrecords[self.nrecs - 1][0]) updated_pdbrecords = [ self.pdbrecords[0][0]] record0_offset = self.pdbrecords[0][0] updated_offset = record0_offset + len(new_record0) for i in range(1, self.nrecs - 1): updated_pdbrecords.append(updated_offset) updated_offset += record_sizes[i] updated_pdbrecords.append(updated_offset) data_blocks = [ new_record0] for i in range(1, self.nrecs): data_blocks.append(self.data[self.pdbrecords[i][0]:self.pdbrecords[i][0] + record_sizes[i]]) self.record0.update(data_blocks) self.update_pdbrecords(updated_pdbrecords) if updated_pdbrecords[-1] + record_sizes[-1] < self.data.stop: self.data.truncate(updated_pdbrecords[-1] + record_sizes[-1]) else: self.data.stop = updated_pdbrecords[-1] + record_sizes[-1] def patchSection(self, section, new): off = self.pdbrecords[section][0] self.patch(off, new) def create_exth(self, new_title = None, exth = None): if isinstance(new_title, unicode): new_title = new_title.encode(self.codec, 'replace') (title_offset,) = unpack('>L', self.record0[84:88]) (title_length,) = unpack('>L', self.record0[88:92]) (title_in_file,) = unpack('%ds' % title_length, self.record0[title_offset:title_offset + title_length]) (mobi_header_length,) = unpack('>L', self.record0[20:24]) if mobi_header_length == 228: self.record0[23] = '\xe8' self.record0[244:248] = pack('>L', 0xFFFFFFFFL) mobi_header_length = 232 self.record0[128:132] = pack('>L', self.flags | 64) if not exth: pad = '\x00\x00\x00\x00' exth = [ 'EXTH', pack('>II', 12, 0), pad] exth = ''.join(exth) if self.encryption_type != 0: self.record0[168:172] = pack('>L', 16 + mobi_header_length + len(exth)) self.record0[176:180] = pack('>L', len(self.drm_block)) self.record0[84:88] = pack('>L', 16 + mobi_header_length + len(exth) + len(self.drm_block)) else: self.record0[84:88] = pack('>L', 16 + mobi_header_length + len(exth)) if new_title: self.record0[88:92] = pack('>L', len(new_title)) new_record0 = StringIO() new_record0.write(self.record0[:16 + mobi_header_length]) new_record0.write(exth) if self.encryption_type != 0: new_record0.write(self.drm_block) None(new_record0.write if new_title else title_in_file) trail = len(new_record0.getvalue()) % 4 pad = '\x00' * (4 - trail) new_record0.write(pad) self.patchSection(0, new_record0.getvalue()) self.record0 = self.record(0) def hexdump(self, src, length = 16): FILTER = []([ '.' for x in range(256) ]) N = 0 result = '' for x in s: hexa = _[2](_[2]['%02X' % ord(x)]) s = s.translate(FILTER) result += '%04X %-*s %s\n' % (N, length * 3, hexa, s) N += length [] print result def get_pdbrecords(self): pdbrecords = [] for i in xrange(self.nrecs): (offset, a1, a2, a3, a4) = unpack('>LBBBB', self.data[78 + i * 8:78 + i * 8 + 8]) flags = a1 val = a2 << 16 | a3 << 8 | a4 pdbrecords.append([ offset, flags, val]) return pdbrecords def update_pdbrecords(self, updated_pdbrecords): for i, pdbrecord in enumerate(updated_pdbrecords): self.data[78 + i * 8:78 + i * 8 + 4] = pack('>L', pdbrecord) self.pdbrecords = self.get_pdbrecords() def dump_pdbrecords(self): print 'MetadataUpdater.dump_pdbrecords()' print '%10s %10s %10s' % ('offset', 'flags', 'val') for i in xrange(len(self.pdbrecords)): pdbrecord = self.pdbrecords[i] print '%10X %10X %10X' % (pdbrecord[0], pdbrecord[1], pdbrecord[2]) def record(self, n): if n >= self.nrecs: raise ValueError('non-existent record %r' % n) n >= self.nrecs offoff = 78 + 8 * n (start,) = unpack('>I', self.data[offoff + 0:offoff + 4]) stop = None if n < self.nrecs - 1: (stop,) = unpack('>I', self.data[offoff + 8:offoff + 12]) return StreamSlicer(self.stream, start, stop) def update(self, mi): def update_exth_record(rec): recs.append(rec) if rec[0] in self.original_exth_records: self.original_exth_records.pop(rec[0]) if self.type != 'BOOKMOBI': raise MobiError("Setting metadata only supported for MOBI files of type 'BOOK'.\n\tThis is a '%s' file of type '%s'" % (self.type[0:4], self.type[4:8])) self.type != 'BOOKMOBI' recs = [] try: load_defaults = load_defaults import calibre.ebooks.conversion.config prefs = load_defaults('mobi_output') pas = prefs.get('prefer_author_sort', False) kindle_pdoc = prefs.get('personal_doc', None) except: (None, None) pas = False kindle_pdoc = None if mi.author_sort and pas: authors = mi.author_sort update_exth_record((100, authors.encode(self.codec, 'replace'))) elif mi.authors: authors = ';'.join(mi.authors) update_exth_record((100, authors.encode(self.codec, 'replace'))) if mi.publisher: update_exth_record((101, mi.publisher.encode(self.codec, 'replace'))) if mi.comments: a_offset = mi.comments.find('<div class="user_annotations">') ad_offset = mi.comments.find('<hr class="annotations_divider" />') if a_offset >= 0: mi.comments = mi.comments[:a_offset] if ad_offset >= 0: mi.comments = mi.comments[:ad_offset] update_exth_record((103, mi.comments.encode(self.codec, 'replace'))) if mi.isbn: update_exth_record((104, mi.isbn.encode(self.codec, 'replace'))) if mi.tags: subjects = '; '.join(mi.tags) update_exth_record((105, subjects.encode(self.codec, 'replace'))) if kindle_pdoc and kindle_pdoc in mi.tags: update_exth_record((501, str('PDOC'))) if mi.pubdate: update_exth_record((106, str(mi.pubdate).encode(self.codec, 'replace'))) elif mi.timestamp: update_exth_record((106, str(mi.timestamp).encode(self.codec, 'replace'))) elif self.timestamp: update_exth_record((106, self.timestamp)) else: update_exth_record((106, nowf().isoformat().encode(self.codec, 'replace'))) if self.cover_record is not None: update_exth_record((201, pack('>I', self.cover_rindex))) update_exth_record((203, pack('>I', 0))) if self.thumbnail_record is not None: update_exth_record((202, pack('>I', self.thumbnail_rindex))) if 503 in self.original_exth_records: update_exth_record((503, mi.title.encode(self.codec, 'replace'))) for id in sorted(self.original_exth_records): recs.append((id, self.original_exth_records[id])) recs = sorted(recs, key = (lambda x: (x[0], x[0]))) exth = StringIO() for code, data in recs: exth.write(pack('>II', code, len(data) + 8)) exth.write(data) exth = exth.getvalue() trail = len(exth) % 4 pad = '\x00' * (4 - trail) exth = [ 'EXTH', pack('>II', len(exth) + 12, len(recs)), exth, pad] exth = ''.join(exth) if getattr(self, 'exth', None) is None: raise MobiError('No existing EXTH record. Cannot update metadata.') getattr(self, 'exth', None) is None self.record0[92:96] = iana2mobi(mi.language) self.create_exth(exth = exth, new_title = mi.title) self.fetchEXTHFields() if mi.cover_data[1] or mi.cover: try: data = None if mi.cover_data[1] else open(mi.cover, 'rb').read() except: pass if self.cover_record is not None: size = len(self.cover_record) cover = rescale_image(data, size) cover += '\x00' * (size - len(cover)) self.cover_record[:] = cover if self.thumbnail_record is not None: size = len(self.thumbnail_record) thumbnail = rescale_image(data, size, dimen = MAX_THUMB_DIMEN) thumbnail += '\x00' * (size - len(thumbnail)) self.thumbnail_record[:] = thumbnail def set_metadata(stream, mi): mu = MetadataUpdater(stream) mu.update(mi)