jpayne@69: # Copyright 1999 by Jeffrey Chang. All rights reserved. jpayne@69: # Copyright 2009-2018 by Peter Cock. All rights reserved. jpayne@69: # jpayne@69: # This file is part of the Biopython distribution and governed by your jpayne@69: # choice of the "Biopython License Agreement" or the "BSD 3-Clause License". jpayne@69: # Please see the LICENSE file that should have been included as part of this jpayne@69: # package. jpayne@69: """Code for more fancy file handles. jpayne@69: jpayne@69: Bio.File defines private classes used in Bio.SeqIO and Bio.SearchIO for jpayne@69: indexing files. These are not intended for direct use. jpayne@69: """ jpayne@69: jpayne@69: import os jpayne@69: import contextlib jpayne@69: import itertools jpayne@69: import collections.abc jpayne@69: jpayne@69: from abc import ABC, abstractmethod jpayne@69: jpayne@69: try: jpayne@69: import sqlite3 jpayne@69: except ImportError: jpayne@69: # May be missing if Python was compiled from source without its dependencies jpayne@69: sqlite3 = None # type: ignore jpayne@69: jpayne@69: jpayne@69: @contextlib.contextmanager jpayne@69: def as_handle(handleish, mode="r", **kwargs): jpayne@69: r"""Context manager to ensure we are using a handle. jpayne@69: jpayne@69: Context manager for arguments that can be passed to SeqIO and AlignIO read, write, jpayne@69: and parse methods: either file objects or path-like objects (strings, pathlib.Path jpayne@69: instances, or more generally, anything that can be handled by the builtin 'open' jpayne@69: function). jpayne@69: jpayne@69: When given a path-like object, returns an open file handle to that path, with provided jpayne@69: mode, which will be closed when the manager exits. jpayne@69: jpayne@69: All other inputs are returned, and are *not* closed. jpayne@69: jpayne@69: Arguments: jpayne@69: - handleish - Either a file handle or path-like object (anything which can be jpayne@69: passed to the builtin 'open' function, such as str, bytes, jpayne@69: pathlib.Path, and os.DirEntry objects) jpayne@69: - mode - Mode to open handleish (used only if handleish is a string) jpayne@69: - kwargs - Further arguments to pass to open(...) jpayne@69: jpayne@69: Examples jpayne@69: -------- jpayne@69: >>> from Bio import File jpayne@69: >>> import os jpayne@69: >>> with File.as_handle('seqs.fasta', 'w') as fp: jpayne@69: ... fp.write('>test\nACGT') jpayne@69: ... jpayne@69: 10 jpayne@69: >>> fp.closed jpayne@69: True jpayne@69: jpayne@69: >>> handle = open('seqs.fasta', 'w') jpayne@69: >>> with File.as_handle(handle) as fp: jpayne@69: ... fp.write('>test\nACGT') jpayne@69: ... jpayne@69: 10 jpayne@69: >>> fp.closed jpayne@69: False jpayne@69: >>> fp.close() jpayne@69: >>> os.remove("seqs.fasta") # tidy up jpayne@69: jpayne@69: """ jpayne@69: try: jpayne@69: with open(handleish, mode, **kwargs) as fp: jpayne@69: yield fp jpayne@69: except TypeError: jpayne@69: yield handleish jpayne@69: jpayne@69: jpayne@69: def _open_for_random_access(filename): jpayne@69: """Open a file in binary mode, spot if it is BGZF format etc (PRIVATE). jpayne@69: jpayne@69: This functionality is used by the Bio.SeqIO and Bio.SearchIO index jpayne@69: and index_db functions. jpayne@69: jpayne@69: If the file is gzipped but not BGZF, a specific ValueError is raised. jpayne@69: """ jpayne@69: handle = open(filename, "rb") jpayne@69: magic = handle.read(2) jpayne@69: handle.seek(0) jpayne@69: jpayne@69: if magic == b"\x1f\x8b": jpayne@69: # This is a gzipped file, but is it BGZF? jpayne@69: from . import bgzf jpayne@69: jpayne@69: try: jpayne@69: # If it is BGZF, we support that jpayne@69: return bgzf.BgzfReader(mode="rb", fileobj=handle) jpayne@69: except ValueError as e: jpayne@69: assert "BGZF" in str(e) jpayne@69: # Not a BGZF file after all, jpayne@69: handle.close() jpayne@69: raise ValueError( jpayne@69: "Gzipped files are not suitable for indexing, " jpayne@69: "please use BGZF (blocked gzip format) instead." jpayne@69: ) from None jpayne@69: jpayne@69: return handle jpayne@69: jpayne@69: jpayne@69: # The rest of this file defines code used in Bio.SeqIO and Bio.SearchIO jpayne@69: # for indexing jpayne@69: jpayne@69: jpayne@69: class _IndexedSeqFileProxy(ABC): jpayne@69: """Abstract base class for file format specific random access (PRIVATE). jpayne@69: jpayne@69: This is subclasses in both Bio.SeqIO for indexing as SeqRecord jpayne@69: objects, and in Bio.SearchIO for indexing QueryResult objects. jpayne@69: jpayne@69: Subclasses for each file format should define '__iter__', 'get' jpayne@69: and optionally 'get_raw' methods. jpayne@69: """ jpayne@69: jpayne@69: @abstractmethod jpayne@69: def __iter__(self): jpayne@69: """Return (identifier, offset, length in bytes) tuples. jpayne@69: jpayne@69: The length can be zero where it is not implemented or not jpayne@69: possible for a particular file format. jpayne@69: """ jpayne@69: raise NotImplementedError jpayne@69: jpayne@69: @abstractmethod jpayne@69: def get(self, offset): jpayne@69: """Return parsed object for this entry.""" jpayne@69: # Most file formats with self contained records can be handled by jpayne@69: # parsing StringIO(self.get_raw(offset).decode()) jpayne@69: raise NotImplementedError jpayne@69: jpayne@69: def get_raw(self, offset): jpayne@69: """Return the raw record from the file as a bytes string (if implemented). jpayne@69: jpayne@69: If the key is not found, a KeyError exception is raised. jpayne@69: jpayne@69: This may not have been implemented for all file formats. jpayne@69: """ jpayne@69: # Should be done by each sub-class (if possible) jpayne@69: raise NotImplementedError("Not available for this file format.") jpayne@69: jpayne@69: jpayne@69: class _IndexedSeqFileDict(collections.abc.Mapping): jpayne@69: """Read only dictionary interface to a sequential record file. jpayne@69: jpayne@69: This code is used in both Bio.SeqIO for indexing as SeqRecord jpayne@69: objects, and in Bio.SearchIO for indexing QueryResult objects. jpayne@69: jpayne@69: Keeps the keys and associated file offsets in memory, reads the file jpayne@69: to access entries as objects parsing them on demand. This approach jpayne@69: is memory limited, but will work even with millions of records. jpayne@69: jpayne@69: Note duplicate keys are not allowed. If this happens, a ValueError jpayne@69: exception is raised. jpayne@69: jpayne@69: As used in Bio.SeqIO, by default the SeqRecord's id string is used jpayne@69: as the dictionary key. In Bio.SearchIO, the query's id string is jpayne@69: used. This can be changed by supplying an optional key_function, jpayne@69: a callback function which will be given the record id and must jpayne@69: return the desired key. For example, this allows you to parse jpayne@69: NCBI style FASTA identifiers, and extract the GI number to use jpayne@69: as the dictionary key. jpayne@69: jpayne@69: Note that this dictionary is essentially read only. You cannot jpayne@69: add or change values, pop values, nor clear the dictionary. jpayne@69: """ jpayne@69: jpayne@69: def __init__(self, random_access_proxy, key_function, repr, obj_repr): jpayne@69: """Initialize the class.""" jpayne@69: # Use key_function=None for default value jpayne@69: self._proxy = random_access_proxy jpayne@69: self._key_function = key_function jpayne@69: self._repr = repr jpayne@69: self._obj_repr = obj_repr jpayne@69: self._cached_prev_record = (None, None) # (key, record) jpayne@69: if key_function: jpayne@69: offset_iter = ( jpayne@69: (key_function(key), offset, length) jpayne@69: for (key, offset, length) in random_access_proxy jpayne@69: ) jpayne@69: else: jpayne@69: offset_iter = random_access_proxy jpayne@69: offsets = {} jpayne@69: for key, offset, length in offset_iter: jpayne@69: # Note - we don't store the length because I want to minimise the jpayne@69: # memory requirements. With the SQLite backend the length is kept jpayne@69: # and is used to speed up the get_raw method (by about 3 times). jpayne@69: # The length should be provided by all the current backends except jpayne@69: # SFF where there is an existing Roche index we can reuse (very fast jpayne@69: # but lacks the record lengths) jpayne@69: # assert length or format in ["sff", "sff-trim"], \ jpayne@69: # "%s at offset %i given length %r (%s format %s)" \ jpayne@69: # % (key, offset, length, filename, format) jpayne@69: if key in offsets: jpayne@69: self._proxy._handle.close() jpayne@69: raise ValueError(f"Duplicate key '{key}'") jpayne@69: else: jpayne@69: offsets[key] = offset jpayne@69: self._offsets = offsets jpayne@69: jpayne@69: def __repr__(self): jpayne@69: """Return a string representation of the File object.""" jpayne@69: return self._repr jpayne@69: jpayne@69: def __str__(self): jpayne@69: """Create a string representation of the File object.""" jpayne@69: # TODO - How best to handle the __str__ for SeqIO and SearchIO? jpayne@69: if self: jpayne@69: return f"{{{list(self.keys())[0]!r} : {self._obj_repr}(...), ...}}" jpayne@69: else: jpayne@69: return "{}" jpayne@69: jpayne@69: def __len__(self): jpayne@69: """Return the number of records.""" jpayne@69: return len(self._offsets) jpayne@69: jpayne@69: def __iter__(self): jpayne@69: """Iterate over the keys.""" jpayne@69: return iter(self._offsets) jpayne@69: jpayne@69: def __getitem__(self, key): jpayne@69: """Return record for the specified key. jpayne@69: jpayne@69: As an optimization when repeatedly asked to look up the same record, jpayne@69: the key and record are cached so that if the *same* record is jpayne@69: requested next time, it can be returned without going to disk. jpayne@69: """ jpayne@69: if key == self._cached_prev_record[0]: jpayne@69: return self._cached_prev_record[1] jpayne@69: # Pass the offset to the proxy jpayne@69: record = self._proxy.get(self._offsets[key]) jpayne@69: if self._key_function: jpayne@69: key2 = self._key_function(record.id) jpayne@69: else: jpayne@69: key2 = record.id jpayne@69: if key != key2: jpayne@69: raise ValueError(f"Key did not match ({key} vs {key2})") jpayne@69: self._cached_prev_record = (key, record) jpayne@69: return record jpayne@69: jpayne@69: def get_raw(self, key): jpayne@69: """Return the raw record from the file as a bytes string. jpayne@69: jpayne@69: If the key is not found, a KeyError exception is raised. jpayne@69: """ jpayne@69: # Pass the offset to the proxy jpayne@69: return self._proxy.get_raw(self._offsets[key]) jpayne@69: jpayne@69: def close(self): jpayne@69: """Close the file handle being used to read the data. jpayne@69: jpayne@69: Once called, further use of the index won't work. The sole purpose jpayne@69: of this method is to allow explicit handle closure - for example jpayne@69: if you wish to delete the file, on Windows you must first close jpayne@69: all open handles to that file. jpayne@69: """ jpayne@69: self._proxy._handle.close() jpayne@69: jpayne@69: jpayne@69: class _SQLiteManySeqFilesDict(_IndexedSeqFileDict): jpayne@69: """Read only dictionary interface to many sequential record files. jpayne@69: jpayne@69: This code is used in both Bio.SeqIO for indexing as SeqRecord jpayne@69: objects, and in Bio.SearchIO for indexing QueryResult objects. jpayne@69: jpayne@69: Keeps the keys, file-numbers and offsets in an SQLite database. To access jpayne@69: a record by key, reads from the offset in the appropriate file and then jpayne@69: parses the record into an object. jpayne@69: jpayne@69: There are OS limits on the number of files that can be open at once, jpayne@69: so a pool are kept. If a record is required from a closed file, then jpayne@69: one of the open handles is closed first. jpayne@69: """ jpayne@69: jpayne@69: def __init__( jpayne@69: self, jpayne@69: index_filename, jpayne@69: filenames, jpayne@69: proxy_factory, jpayne@69: fmt, jpayne@69: key_function, jpayne@69: repr, jpayne@69: max_open=10, jpayne@69: ): jpayne@69: """Initialize the class.""" jpayne@69: # TODO? - Don't keep filename list in memory (just in DB)? jpayne@69: # Should save a chunk of memory if dealing with 1000s of files. jpayne@69: # Furthermore could compare a generator to the DB on reloading jpayne@69: # (no need to turn it into a list) jpayne@69: jpayne@69: if sqlite3 is None: jpayne@69: # Python was compiled without sqlite3 support jpayne@69: from Bio import MissingPythonDependencyError jpayne@69: jpayne@69: raise MissingPythonDependencyError( jpayne@69: "Python was compiled without the sqlite3 module" jpayne@69: ) jpayne@69: if filenames is not None: jpayne@69: filenames = list(filenames) # In case it was a generator jpayne@69: jpayne@69: # Cache the arguments as private variables jpayne@69: self._index_filename = index_filename jpayne@69: self._filenames = filenames jpayne@69: self._format = fmt jpayne@69: self._key_function = key_function jpayne@69: self._proxy_factory = proxy_factory jpayne@69: self._repr = repr jpayne@69: self._max_open = max_open jpayne@69: self._proxies = {} jpayne@69: jpayne@69: # Note if using SQLite :memory: trick index filename, this will jpayne@69: # give $PWD as the relative path (which is fine). jpayne@69: self._relative_path = os.path.abspath(os.path.dirname(index_filename)) jpayne@69: jpayne@69: if os.path.isfile(index_filename): jpayne@69: self._load_index() jpayne@69: else: jpayne@69: self._build_index() jpayne@69: jpayne@69: def _load_index(self): jpayne@69: """Call from __init__ to re-use an existing index (PRIVATE).""" jpayne@69: index_filename = self._index_filename jpayne@69: relative_path = self._relative_path jpayne@69: filenames = self._filenames jpayne@69: fmt = self._format jpayne@69: proxy_factory = self._proxy_factory jpayne@69: jpayne@69: con = sqlite3.dbapi2.connect(index_filename, check_same_thread=False) jpayne@69: self._con = con jpayne@69: # Check the count... jpayne@69: try: jpayne@69: (count,) = con.execute( jpayne@69: "SELECT value FROM meta_data WHERE key=?;", ("count",) jpayne@69: ).fetchone() jpayne@69: self._length = int(count) jpayne@69: if self._length == -1: jpayne@69: con.close() jpayne@69: raise ValueError("Unfinished/partial database") from None jpayne@69: jpayne@69: # use MAX(_ROWID_) to obtain the number of sequences in the database jpayne@69: # using COUNT(key) is quite slow in SQLITE jpayne@69: # (https://stackoverflow.com/questions/8988915/sqlite-count-slow-on-big-tables) jpayne@69: (count,) = con.execute("SELECT MAX(_ROWID_) FROM offset_data;").fetchone() jpayne@69: if self._length != int(count): jpayne@69: con.close() jpayne@69: raise ValueError( jpayne@69: "Corrupt database? %i entries not %i" % (int(count), self._length) jpayne@69: ) from None jpayne@69: (self._format,) = con.execute( jpayne@69: "SELECT value FROM meta_data WHERE key=?;", ("format",) jpayne@69: ).fetchone() jpayne@69: if fmt and fmt != self._format: jpayne@69: con.close() jpayne@69: raise ValueError( jpayne@69: f"Index file says format {self._format}, not {fmt}" jpayne@69: ) from None jpayne@69: try: jpayne@69: (filenames_relative_to_index,) = con.execute( jpayne@69: "SELECT value FROM meta_data WHERE key=?;", jpayne@69: ("filenames_relative_to_index",), jpayne@69: ).fetchone() jpayne@69: filenames_relative_to_index = ( jpayne@69: filenames_relative_to_index.upper() == "TRUE" jpayne@69: ) jpayne@69: except TypeError: jpayne@69: # Original behaviour, assume if meta_data missing jpayne@69: filenames_relative_to_index = False jpayne@69: self._filenames = [ jpayne@69: row[0] jpayne@69: for row in con.execute( jpayne@69: "SELECT name FROM file_data ORDER BY file_number;" jpayne@69: ).fetchall() jpayne@69: ] jpayne@69: if filenames_relative_to_index: jpayne@69: # Not implicitly relative to $PWD, explicitly relative to index file jpayne@69: relative_path = os.path.abspath(os.path.dirname(index_filename)) jpayne@69: tmp = [] jpayne@69: for f in self._filenames: jpayne@69: if os.path.isabs(f): jpayne@69: tmp.append(f) jpayne@69: else: jpayne@69: # Would be stored with Unix / path separator, so convert jpayne@69: # it to the local OS path separator here: jpayne@69: tmp.append( jpayne@69: os.path.join(relative_path, f.replace("/", os.path.sep)) jpayne@69: ) jpayne@69: self._filenames = tmp jpayne@69: del tmp jpayne@69: if filenames and len(filenames) != len(self._filenames): jpayne@69: con.close() jpayne@69: raise ValueError( jpayne@69: "Index file says %i files, not %i" jpayne@69: % (len(self._filenames), len(filenames)) jpayne@69: ) from None jpayne@69: if filenames and filenames != self._filenames: jpayne@69: for old, new in zip(self._filenames, filenames): jpayne@69: # Want exact match (after making relative to the index above) jpayne@69: if os.path.abspath(old) != os.path.abspath(new): jpayne@69: con.close() jpayne@69: if filenames_relative_to_index: jpayne@69: raise ValueError( jpayne@69: "Index file has different filenames, e.g. %r != %r" jpayne@69: % (os.path.abspath(old), os.path.abspath(new)) jpayne@69: ) from None jpayne@69: else: jpayne@69: raise ValueError( jpayne@69: "Index file has different filenames " jpayne@69: "[This is an old index where any relative paths " jpayne@69: "were relative to the original working directory]. " jpayne@69: "e.g. %r != %r" jpayne@69: % (os.path.abspath(old), os.path.abspath(new)) jpayne@69: ) from None jpayne@69: # Filenames are equal (after imposing abspath) jpayne@69: except sqlite3.OperationalError as err: jpayne@69: con.close() jpayne@69: raise ValueError(f"Not a Biopython index database? {err}") from None jpayne@69: # Now we have the format (from the DB if not given to us), jpayne@69: if not proxy_factory(self._format): jpayne@69: con.close() jpayne@69: raise ValueError(f"Unsupported format '{self._format}'") jpayne@69: jpayne@69: def _build_index(self): jpayne@69: """Call from __init__ to create a new index (PRIVATE).""" jpayne@69: index_filename = self._index_filename jpayne@69: relative_path = self._relative_path jpayne@69: filenames = self._filenames jpayne@69: fmt = self._format jpayne@69: key_function = self._key_function jpayne@69: proxy_factory = self._proxy_factory jpayne@69: max_open = self._max_open jpayne@69: random_access_proxies = self._proxies jpayne@69: jpayne@69: if not fmt or not filenames: jpayne@69: raise ValueError( jpayne@69: f"Filenames to index and format required to build {index_filename!r}" jpayne@69: ) jpayne@69: if not proxy_factory(fmt): jpayne@69: raise ValueError(f"Unsupported format '{fmt}'") jpayne@69: # Create the index jpayne@69: con = sqlite3.dbapi2.connect(index_filename) jpayne@69: self._con = con jpayne@69: # print("Creating index") jpayne@69: # Sqlite PRAGMA settings for speed jpayne@69: con.execute("PRAGMA synchronous=OFF") jpayne@69: con.execute("PRAGMA locking_mode=EXCLUSIVE") jpayne@69: # Don't index the key column until the end (faster) jpayne@69: # con.execute("CREATE TABLE offset_data (key TEXT PRIMARY KEY, " jpayne@69: # "offset INTEGER);") jpayne@69: con.execute("CREATE TABLE meta_data (key TEXT, value TEXT);") jpayne@69: con.execute("INSERT INTO meta_data (key, value) VALUES (?,?);", ("count", -1)) jpayne@69: con.execute("INSERT INTO meta_data (key, value) VALUES (?,?);", ("format", fmt)) jpayne@69: con.execute( jpayne@69: "INSERT INTO meta_data (key, value) VALUES (?,?);", jpayne@69: ("filenames_relative_to_index", "True"), jpayne@69: ) jpayne@69: # TODO - Record the file size and modified date? jpayne@69: con.execute("CREATE TABLE file_data (file_number INTEGER, name TEXT);") jpayne@69: con.execute( jpayne@69: "CREATE TABLE offset_data (key TEXT, " jpayne@69: "file_number INTEGER, offset INTEGER, length INTEGER);" jpayne@69: ) jpayne@69: count = 0 jpayne@69: for file_index, filename in enumerate(filenames): jpayne@69: # Default to storing as an absolute path, jpayne@69: f = os.path.abspath(filename) jpayne@69: if not os.path.isabs(filename) and not os.path.isabs(index_filename): jpayne@69: # Since user gave BOTH filename & index as relative paths, jpayne@69: # we will store this relative to the index file even though jpayne@69: # if it may now start ../ (meaning up a level) jpayne@69: # Note for cross platform use (e.g. shared drive over SAMBA), jpayne@69: # convert any Windows slash into Unix style for rel paths. jpayne@69: f = os.path.relpath(filename, relative_path).replace(os.path.sep, "/") jpayne@69: elif (os.path.dirname(os.path.abspath(filename)) + os.path.sep).startswith( jpayne@69: relative_path + os.path.sep jpayne@69: ): jpayne@69: # Since sequence file is in same directory or sub directory, jpayne@69: # might as well make this into a relative path: jpayne@69: f = os.path.relpath(filename, relative_path).replace(os.path.sep, "/") jpayne@69: assert not f.startswith("../"), f jpayne@69: # print("DEBUG - storing %r as [%r] %r" % (filename, relative_path, f)) jpayne@69: con.execute( jpayne@69: "INSERT INTO file_data (file_number, name) VALUES (?,?);", jpayne@69: (file_index, f), jpayne@69: ) jpayne@69: random_access_proxy = proxy_factory(fmt, filename) jpayne@69: if key_function: jpayne@69: offset_iter = ( jpayne@69: (key_function(key), file_index, offset, length) jpayne@69: for (key, offset, length) in random_access_proxy jpayne@69: ) jpayne@69: else: jpayne@69: offset_iter = ( jpayne@69: (key, file_index, offset, length) jpayne@69: for (key, offset, length) in random_access_proxy jpayne@69: ) jpayne@69: while True: jpayne@69: batch = list(itertools.islice(offset_iter, 100)) jpayne@69: if not batch: jpayne@69: break jpayne@69: # print("Inserting batch of %i offsets, %s ... %s" jpayne@69: # % (len(batch), batch[0][0], batch[-1][0])) jpayne@69: con.executemany( jpayne@69: "INSERT INTO offset_data (key,file_number,offset,length) VALUES (?,?,?,?);", jpayne@69: batch, jpayne@69: ) jpayne@69: con.commit() jpayne@69: count += len(batch) jpayne@69: if len(random_access_proxies) < max_open: jpayne@69: random_access_proxies[file_index] = random_access_proxy jpayne@69: else: jpayne@69: random_access_proxy._handle.close() jpayne@69: self._length = count jpayne@69: # print("About to index %i entries" % count) jpayne@69: try: jpayne@69: con.execute( jpayne@69: "CREATE UNIQUE INDEX IF NOT EXISTS key_index ON offset_data(key);" jpayne@69: ) jpayne@69: except sqlite3.IntegrityError as err: jpayne@69: self._proxies = random_access_proxies jpayne@69: self.close() jpayne@69: con.close() jpayne@69: raise ValueError(f"Duplicate key? {err}") from None jpayne@69: con.execute("PRAGMA locking_mode=NORMAL") jpayne@69: con.execute("UPDATE meta_data SET value = ? WHERE key = ?;", (count, "count")) jpayne@69: con.commit() jpayne@69: # print("Index created") jpayne@69: jpayne@69: def __repr__(self): jpayne@69: return self._repr jpayne@69: jpayne@69: def __contains__(self, key): jpayne@69: return bool( jpayne@69: self._con.execute( jpayne@69: "SELECT key FROM offset_data WHERE key=?;", (key,) jpayne@69: ).fetchone() jpayne@69: ) jpayne@69: jpayne@69: def __len__(self): jpayne@69: """Return the number of records indexed.""" jpayne@69: return self._length jpayne@69: # return self._con.execute("SELECT COUNT(key) FROM offset_data;").fetchone()[0] jpayne@69: jpayne@69: def __iter__(self): jpayne@69: """Iterate over the keys.""" jpayne@69: for row in self._con.execute( jpayne@69: "SELECT key FROM offset_data ORDER BY file_number, offset;" jpayne@69: ): jpayne@69: yield str(row[0]) jpayne@69: jpayne@69: def __getitem__(self, key): jpayne@69: """Return record for the specified key.""" jpayne@69: # Pass the offset to the proxy jpayne@69: row = self._con.execute( jpayne@69: "SELECT file_number, offset FROM offset_data WHERE key=?;", (key,) jpayne@69: ).fetchone() jpayne@69: if not row: jpayne@69: raise KeyError jpayne@69: file_number, offset = row jpayne@69: proxies = self._proxies jpayne@69: if file_number in proxies: jpayne@69: record = proxies[file_number].get(offset) jpayne@69: else: jpayne@69: if len(proxies) >= self._max_open: jpayne@69: # Close an old handle... jpayne@69: proxies.popitem()[1]._handle.close() jpayne@69: # Open a new handle... jpayne@69: proxy = self._proxy_factory(self._format, self._filenames[file_number]) jpayne@69: record = proxy.get(offset) jpayne@69: proxies[file_number] = proxy jpayne@69: if self._key_function: jpayne@69: key2 = self._key_function(record.id) jpayne@69: else: jpayne@69: key2 = record.id jpayne@69: if key != key2: jpayne@69: raise ValueError(f"Key did not match ({key} vs {key2})") jpayne@69: return record jpayne@69: jpayne@69: def get_raw(self, key): jpayne@69: """Return the raw record from the file as a bytes string. jpayne@69: jpayne@69: If the key is not found, a KeyError exception is raised. jpayne@69: """ jpayne@69: # Pass the offset to the proxy jpayne@69: row = self._con.execute( jpayne@69: "SELECT file_number, offset, length FROM offset_data WHERE key=?;", (key,) jpayne@69: ).fetchone() jpayne@69: if not row: jpayne@69: raise KeyError jpayne@69: file_number, offset, length = row jpayne@69: proxies = self._proxies jpayne@69: if file_number in proxies: jpayne@69: if length: jpayne@69: # Shortcut if we have the length jpayne@69: h = proxies[file_number]._handle jpayne@69: h.seek(offset) jpayne@69: return h.read(length) jpayne@69: else: jpayne@69: return proxies[file_number].get_raw(offset) jpayne@69: else: jpayne@69: # This code is duplicated from __getitem__ to avoid a function call jpayne@69: if len(proxies) >= self._max_open: jpayne@69: # Close an old handle... jpayne@69: proxies.popitem()[1]._handle.close() jpayne@69: # Open a new handle... jpayne@69: proxy = self._proxy_factory(self._format, self._filenames[file_number]) jpayne@69: proxies[file_number] = proxy jpayne@69: if length: jpayne@69: # Shortcut if we have the length jpayne@69: h = proxy._handle jpayne@69: h.seek(offset) jpayne@69: return h.read(length) jpayne@69: else: jpayne@69: return proxy.get_raw(offset) jpayne@69: jpayne@69: def close(self): jpayne@69: """Close any open file handles.""" jpayne@69: proxies = self._proxies jpayne@69: while proxies: jpayne@69: proxies.popitem()[1]._handle.close()