dumb.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316
  1. """A dumb and slow but simple dbm clone.
  2. For database spam, spam.dir contains the index (a text file),
  3. spam.bak *may* contain a backup of the index (also a text file),
  4. while spam.dat contains the data (a binary file).
  5. XXX TO DO:
  6. - seems to contain a bug when updating...
  7. - reclaim free space (currently, space once occupied by deleted or expanded
  8. items is never reused)
  9. - support concurrent access (currently, if two processes take turns making
  10. updates, they can mess up the index)
  11. - support efficient access to large databases (currently, the whole index
  12. is read when the database is opened, and some updates rewrite the whole index)
  13. - support opening for read-only (flag = 'm')
  14. """
  15. import ast as _ast
  16. import io as _io
  17. import os as _os
  18. import collections.abc
  19. __all__ = ["error", "open"]
  20. _BLOCKSIZE = 512
  21. error = OSError
  22. class _Database(collections.abc.MutableMapping):
  23. # The on-disk directory and data files can remain in mutually
  24. # inconsistent states for an arbitrarily long time (see comments
  25. # at the end of __setitem__). This is only repaired when _commit()
  26. # gets called. One place _commit() gets called is from __del__(),
  27. # and if that occurs at program shutdown time, module globals may
  28. # already have gotten rebound to None. Since it's crucial that
  29. # _commit() finish successfully, we can't ignore shutdown races
  30. # here, and _commit() must not reference any globals.
  31. _os = _os # for _commit()
  32. _io = _io # for _commit()
  33. def __init__(self, filebasename, mode, flag='c'):
  34. self._mode = mode
  35. self._readonly = (flag == 'r')
  36. # The directory file is a text file. Each line looks like
  37. # "%r, (%d, %d)\n" % (key, pos, siz)
  38. # where key is the string key, pos is the offset into the dat
  39. # file of the associated value's first byte, and siz is the number
  40. # of bytes in the associated value.
  41. self._dirfile = filebasename + '.dir'
  42. # The data file is a binary file pointed into by the directory
  43. # file, and holds the values associated with keys. Each value
  44. # begins at a _BLOCKSIZE-aligned byte offset, and is a raw
  45. # binary 8-bit string value.
  46. self._datfile = filebasename + '.dat'
  47. self._bakfile = filebasename + '.bak'
  48. # The index is an in-memory dict, mirroring the directory file.
  49. self._index = None # maps keys to (pos, siz) pairs
  50. # Handle the creation
  51. self._create(flag)
  52. self._update(flag)
  53. def _create(self, flag):
  54. if flag == 'n':
  55. for filename in (self._datfile, self._bakfile, self._dirfile):
  56. try:
  57. _os.remove(filename)
  58. except OSError:
  59. pass
  60. # Mod by Jack: create data file if needed
  61. try:
  62. f = _io.open(self._datfile, 'r', encoding="Latin-1")
  63. except OSError:
  64. if flag not in ('c', 'n'):
  65. raise
  66. with _io.open(self._datfile, 'w', encoding="Latin-1") as f:
  67. self._chmod(self._datfile)
  68. else:
  69. f.close()
  70. # Read directory file into the in-memory index dict.
  71. def _update(self, flag):
  72. self._modified = False
  73. self._index = {}
  74. try:
  75. f = _io.open(self._dirfile, 'r', encoding="Latin-1")
  76. except OSError:
  77. if flag not in ('c', 'n'):
  78. raise
  79. self._modified = True
  80. else:
  81. with f:
  82. for line in f:
  83. line = line.rstrip()
  84. key, pos_and_siz_pair = _ast.literal_eval(line)
  85. key = key.encode('Latin-1')
  86. self._index[key] = pos_and_siz_pair
  87. # Write the index dict to the directory file. The original directory
  88. # file (if any) is renamed with a .bak extension first. If a .bak
  89. # file currently exists, it's deleted.
  90. def _commit(self):
  91. # CAUTION: It's vital that _commit() succeed, and _commit() can
  92. # be called from __del__(). Therefore we must never reference a
  93. # global in this routine.
  94. if self._index is None or not self._modified:
  95. return # nothing to do
  96. try:
  97. self._os.unlink(self._bakfile)
  98. except OSError:
  99. pass
  100. try:
  101. self._os.rename(self._dirfile, self._bakfile)
  102. except OSError:
  103. pass
  104. with self._io.open(self._dirfile, 'w', encoding="Latin-1") as f:
  105. self._chmod(self._dirfile)
  106. for key, pos_and_siz_pair in self._index.items():
  107. # Use Latin-1 since it has no qualms with any value in any
  108. # position; UTF-8, though, does care sometimes.
  109. entry = "%r, %r\n" % (key.decode('Latin-1'), pos_and_siz_pair)
  110. f.write(entry)
  111. sync = _commit
  112. def _verify_open(self):
  113. if self._index is None:
  114. raise error('DBM object has already been closed')
  115. def __getitem__(self, key):
  116. if isinstance(key, str):
  117. key = key.encode('utf-8')
  118. self._verify_open()
  119. pos, siz = self._index[key] # may raise KeyError
  120. with _io.open(self._datfile, 'rb') as f:
  121. f.seek(pos)
  122. dat = f.read(siz)
  123. return dat
  124. # Append val to the data file, starting at a _BLOCKSIZE-aligned
  125. # offset. The data file is first padded with NUL bytes (if needed)
  126. # to get to an aligned offset. Return pair
  127. # (starting offset of val, len(val))
  128. def _addval(self, val):
  129. with _io.open(self._datfile, 'rb+') as f:
  130. f.seek(0, 2)
  131. pos = int(f.tell())
  132. npos = ((pos + _BLOCKSIZE - 1) // _BLOCKSIZE) * _BLOCKSIZE
  133. f.write(b'\0'*(npos-pos))
  134. pos = npos
  135. f.write(val)
  136. return (pos, len(val))
  137. # Write val to the data file, starting at offset pos. The caller
  138. # is responsible for ensuring that there's enough room starting at
  139. # pos to hold val, without overwriting some other value. Return
  140. # pair (pos, len(val)).
  141. def _setval(self, pos, val):
  142. with _io.open(self._datfile, 'rb+') as f:
  143. f.seek(pos)
  144. f.write(val)
  145. return (pos, len(val))
  146. # key is a new key whose associated value starts in the data file
  147. # at offset pos and with length siz. Add an index record to
  148. # the in-memory index dict, and append one to the directory file.
  149. def _addkey(self, key, pos_and_siz_pair):
  150. self._index[key] = pos_and_siz_pair
  151. with _io.open(self._dirfile, 'a', encoding="Latin-1") as f:
  152. self._chmod(self._dirfile)
  153. f.write("%r, %r\n" % (key.decode("Latin-1"), pos_and_siz_pair))
  154. def __setitem__(self, key, val):
  155. if self._readonly:
  156. raise error('The database is opened for reading only')
  157. if isinstance(key, str):
  158. key = key.encode('utf-8')
  159. elif not isinstance(key, (bytes, bytearray)):
  160. raise TypeError("keys must be bytes or strings")
  161. if isinstance(val, str):
  162. val = val.encode('utf-8')
  163. elif not isinstance(val, (bytes, bytearray)):
  164. raise TypeError("values must be bytes or strings")
  165. self._verify_open()
  166. self._modified = True
  167. if key not in self._index:
  168. self._addkey(key, self._addval(val))
  169. else:
  170. # See whether the new value is small enough to fit in the
  171. # (padded) space currently occupied by the old value.
  172. pos, siz = self._index[key]
  173. oldblocks = (siz + _BLOCKSIZE - 1) // _BLOCKSIZE
  174. newblocks = (len(val) + _BLOCKSIZE - 1) // _BLOCKSIZE
  175. if newblocks <= oldblocks:
  176. self._index[key] = self._setval(pos, val)
  177. else:
  178. # The new value doesn't fit in the (padded) space used
  179. # by the old value. The blocks used by the old value are
  180. # forever lost.
  181. self._index[key] = self._addval(val)
  182. # Note that _index may be out of synch with the directory
  183. # file now: _setval() and _addval() don't update the directory
  184. # file. This also means that the on-disk directory and data
  185. # files are in a mutually inconsistent state, and they'll
  186. # remain that way until _commit() is called. Note that this
  187. # is a disaster (for the database) if the program crashes
  188. # (so that _commit() never gets called).
  189. def __delitem__(self, key):
  190. if self._readonly:
  191. raise error('The database is opened for reading only')
  192. if isinstance(key, str):
  193. key = key.encode('utf-8')
  194. self._verify_open()
  195. self._modified = True
  196. # The blocks used by the associated value are lost.
  197. del self._index[key]
  198. # XXX It's unclear why we do a _commit() here (the code always
  199. # XXX has, so I'm not changing it). __setitem__ doesn't try to
  200. # XXX keep the directory file in synch. Why should we? Or
  201. # XXX why shouldn't __setitem__?
  202. self._commit()
  203. def keys(self):
  204. try:
  205. return list(self._index)
  206. except TypeError:
  207. raise error('DBM object has already been closed') from None
  208. def items(self):
  209. self._verify_open()
  210. return [(key, self[key]) for key in self._index.keys()]
  211. def __contains__(self, key):
  212. if isinstance(key, str):
  213. key = key.encode('utf-8')
  214. try:
  215. return key in self._index
  216. except TypeError:
  217. if self._index is None:
  218. raise error('DBM object has already been closed') from None
  219. else:
  220. raise
  221. def iterkeys(self):
  222. try:
  223. return iter(self._index)
  224. except TypeError:
  225. raise error('DBM object has already been closed') from None
  226. __iter__ = iterkeys
  227. def __len__(self):
  228. try:
  229. return len(self._index)
  230. except TypeError:
  231. raise error('DBM object has already been closed') from None
  232. def close(self):
  233. try:
  234. self._commit()
  235. finally:
  236. self._index = self._datfile = self._dirfile = self._bakfile = None
  237. __del__ = close
  238. def _chmod(self, file):
  239. self._os.chmod(file, self._mode)
  240. def __enter__(self):
  241. return self
  242. def __exit__(self, *args):
  243. self.close()
  244. def open(file, flag='c', mode=0o666):
  245. """Open the database file, filename, and return corresponding object.
  246. The flag argument, used to control how the database is opened in the
  247. other DBM implementations, supports only the semantics of 'c' and 'n'
  248. values. Other values will default to the semantics of 'c' value:
  249. the database will always opened for update and will be created if it
  250. does not exist.
  251. The optional mode argument is the UNIX mode of the file, used only when
  252. the database has to be created. It defaults to octal code 0o666 (and
  253. will be modified by the prevailing umask).
  254. """
  255. # Modify mode depending on the umask
  256. try:
  257. um = _os.umask(0)
  258. _os.umask(um)
  259. except AttributeError:
  260. pass
  261. else:
  262. # Turn off any bits that are set in the umask
  263. mode = mode & (~um)
  264. if flag not in ('r', 'w', 'c', 'n'):
  265. raise ValueError("Flag must be one of 'r', 'w', 'c', or 'n'")
  266. return _Database(file, mode, flag=flag)