home *** CD-ROM | disk | FTP | other *** search
- """
-
- A better system for getting resources from PakFiles is needed.
-
- At present, we're simply doing pakfiles[1].read_by_name etc., which requires
- the magic number 1 for the right file to check for model diffuse maps. This
- fails for models from the expansions which will have their textures in 20 or
- 30 something.
-
- Although not fastest, it's probably easiest to just search until we find what
- is required, aided by a mapping of which types are in each file. Here is that
- mapping:
-
- PAK_MAP = {
- 'bob': (5, 22, 32'),
- 'bor': (4, 22, 32),
- 'bsi': (8, 22, 32),
- 'dds': (1, 22, 25, 32, 35),
- 'des': (6, 22, 25, 32, 35),
- 'lua': (10, 21, 34),
- 'mp3': (3, 10, 23, 26, 30, 33, 36),
- 'msb': (8, 22, 25, 32, 35),
- 'msh': (9, 22, 25, 32, 35),
- 'sem': (27, ),
- 'tga': (0, 22, 25, 32, 35),
- 'txt': (0, 22, 23, 26, 32, 33, 35, 36),
- 'wav': (2, 20, 23, 30, 33)
- }
-
- It is probably worth making an enum to name each of the files, for the times
- we have to reference one in particular, for example when we grab camera.des
- from pakfiles[6] for posing models. A file list for the original TOoD would
- be useful to confirm the naming. It looks like 21 should be the last pak in
- the original game.
-
- Example:
-
- def fetch_resource(pakfiles, name):
- extension = os.path.splitext(name)[1]
- for pf in PAK_MAP[extension]:
- data = pf.read_by_name(name)
- if data is not None:
- return data
- return None
-
- """
-
- import io
- import os
- import struct
-
- import utils
-
-
- class PakFileHeader:
- RAW_SIZE = 92
- VERSION_STRING = 'MASSIVE PAKFILE V 4.0\r\n\0'
-
- def __init__(self, f):
- raw_data = f.read(PakFileHeader.RAW_SIZE)
- ins = io.BytesIO(raw_data)
- # 4, VERSION_STRING
- self.ver, self.ver_string = struct.unpack('<L24s', ins.read(4 + 24))
- # [0]..[4]: 0, 1245104==12ffb0, 4222728==406f08, 4243768==40c138, -1
- # [5]: 322840 3229b0 322a40 322910 3228e0 322900 3228f0
- # [6]: 4212818==404852
- # [7]: 22, 26, 31, 35; == u9
- # [8]: 4031da
- # [a]: -1
- # [b]: looks like crc32
- self.uvals = struct.unpack('<12L', ins.read(12 * 4))
- self.num_files, self.root_index, self.data_start, self.file_size = \
- struct.unpack('<4L', ins.read(16))
-
-
- class FileEntry:
- RAW_SIZE = 16
-
- def __init__(self, ins):
- self.size, self.pos, a, b = struct.unpack('<4L', ins.read(16))
- self.filename_offset = a & 0xffffff
- self.dirname_offset = b & 0xffffff
- # In sf0.pak, there were 2077 entries but only 1932 distinct values;
- # min was 0 max was 2077; is this a sorted order? Were there missing
- # numbers, or shared numbers?
- self.node_num = (a >> 16) | (b >> 24)
-
-
- def load_file_entries(f, num_files):
- block_size = FileEntry.RAW_SIZE * num_files
- block = f.read(block_size)
- ins = io.BytesIO(block)
- return [FileEntry(ins) for _ in range(num_files)]
-
-
- def process_raw_names(block, file_entries):
- names = {}
- ins = io.BytesIO(block)
- for entry in file_entries:
- if entry.dirname_offset not in names:
- ins.seek(entry.dirname_offset)
- names[entry.dirname_offset] = utils.read_cstr(ins)[::-1]
- # Skip unknown int16 in front of filenames (hash?)
- ins.seek(entry.filename_offset + 2)
- names[entry.filename_offset] = utils.read_cstr(ins)[::-1]
- return names
-
-
- class PakFile:
- def __init__(self, filename):
- self.f = open(filename, 'rb')
- self.header = PakFileHeader(self.f)
- self.entries = load_file_entries(self.f, self.header.num_files)
- name_block_size = self.header.data_start - self.f.tell()
- name_block = self.f.read(name_block_size)
- self.names = process_raw_names(name_block, self.entries)
-
- def find_file(self, sought_filename):
- dirname, filename = os.path.split(sought_filename)
- for entry in self.entries:
- if self.names[entry.dirname_offset] != dirname:
- continue
- if self.names[entry.filename_offset] == filename:
- return entry
- return None
-
- def dirname_of_entry(self, entry):
- return self.names[entry.dirname_offset]
-
- def dirname_of_offset(self, offset):
- return self.names[offset]
-
- def filename_of_entry(self, entry):
- return self.names[entry.filename_offset]
-
- def filename_of_index(self, index):
- return self.names[self.entries[index].filename_offset]
-
- def read_by_index(self, index):
- entry = self.entries[index]
- self.f.seek(self.header.data_start + entry.pos)
- data = self.f.read(entry.size)
- return data
-
- def read_by_name(self, name):
- entry = self.find_file(name)
- if entry is None:
- return None
- index = self.entries.index(entry)
- return self.read_by_index(index)
-
- def sorted_dir_offsets(self):
- offsets = set(entry.dirname_offset for entry in self.entries)
- return sorted(offsets, key=lambda offset: self.names[offset])
-
- def sorted_file_indices_of_dir_offset(self, offset):
- indices = [i for i in range(len(self.entries))
- if self.entries[i].dirname_offset == offset]
- return sorted(indices, key=self.filename_of_index)
-
- def get_name_and_size(self, index):
- return self.filename_of_index(index), self.entries[index].size
-
-
- class PakFileCollection:
- NUMS = (
- 0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 20, 21,
- 22, 23, 25, 26, 27, 30, 32, 33, 34, 35, 36
- )
-
- def __init__(self, path):
- self.files = {
- num: PakFile(path + 'sf{}.pak'.format(num))
- for num in PakFileCollection.NUMS
- }
-
- def __getitem__(self, key):
- return self.files[key]
-