sql.py 74 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298
  1. """
  2. Collection of query wrappers / abstractions to both facilitate data
  3. retrieval and to reduce dependency on DB-specific API.
  4. """
  5. from __future__ import annotations
  6. from contextlib import contextmanager
  7. from datetime import (
  8. date,
  9. datetime,
  10. time,
  11. )
  12. from functools import partial
  13. import re
  14. from typing import (
  15. Any,
  16. Iterator,
  17. Sequence,
  18. cast,
  19. overload,
  20. )
  21. import warnings
  22. import numpy as np
  23. import pandas._libs.lib as lib
  24. from pandas._typing import DtypeArg
  25. from pandas.compat._optional import import_optional_dependency
  26. from pandas.errors import AbstractMethodError
  27. from pandas.core.dtypes.common import (
  28. is_datetime64tz_dtype,
  29. is_dict_like,
  30. is_list_like,
  31. )
  32. from pandas.core.dtypes.dtypes import DatetimeTZDtype
  33. from pandas.core.dtypes.missing import isna
  34. from pandas import get_option
  35. from pandas.core.api import (
  36. DataFrame,
  37. Series,
  38. )
  39. from pandas.core.base import PandasObject
  40. from pandas.core.tools.datetimes import to_datetime
  41. from pandas.util.version import Version
  42. class SQLAlchemyRequired(ImportError):
  43. pass
  44. class DatabaseError(IOError):
  45. pass
  46. # -----------------------------------------------------------------------------
  47. # -- Helper functions
  48. _SQLALCHEMY_INSTALLED: bool | None = None
  49. def _is_sqlalchemy_connectable(con):
  50. global _SQLALCHEMY_INSTALLED
  51. if _SQLALCHEMY_INSTALLED is None:
  52. try:
  53. import sqlalchemy
  54. _SQLALCHEMY_INSTALLED = True
  55. except ImportError:
  56. _SQLALCHEMY_INSTALLED = False
  57. if _SQLALCHEMY_INSTALLED:
  58. import sqlalchemy # noqa: F811
  59. return isinstance(con, sqlalchemy.engine.Connectable)
  60. else:
  61. return False
  62. def _gt14() -> bool:
  63. """
  64. Check if sqlalchemy.__version__ is at least 1.4.0, when several
  65. deprecations were made.
  66. """
  67. import sqlalchemy
  68. return Version(sqlalchemy.__version__) >= Version("1.4.0")
  69. def _convert_params(sql, params):
  70. """Convert SQL and params args to DBAPI2.0 compliant format."""
  71. args = [sql]
  72. if params is not None:
  73. if hasattr(params, "keys"): # test if params is a mapping
  74. args += [params]
  75. else:
  76. args += [list(params)]
  77. return args
  78. def _process_parse_dates_argument(parse_dates):
  79. """Process parse_dates argument for read_sql functions"""
  80. # handle non-list entries for parse_dates gracefully
  81. if parse_dates is True or parse_dates is None or parse_dates is False:
  82. parse_dates = []
  83. elif not hasattr(parse_dates, "__iter__"):
  84. parse_dates = [parse_dates]
  85. return parse_dates
  86. def _handle_date_column(
  87. col, utc: bool | None = None, format: str | dict[str, Any] | None = None
  88. ):
  89. if isinstance(format, dict):
  90. # GH35185 Allow custom error values in parse_dates argument of
  91. # read_sql like functions.
  92. # Format can take on custom to_datetime argument values such as
  93. # {"errors": "coerce"} or {"dayfirst": True}
  94. error = format.pop("errors", None) or "ignore"
  95. return to_datetime(col, errors=error, **format)
  96. else:
  97. # Allow passing of formatting string for integers
  98. # GH17855
  99. if format is None and (
  100. issubclass(col.dtype.type, np.floating)
  101. or issubclass(col.dtype.type, np.integer)
  102. ):
  103. format = "s"
  104. if format in ["D", "d", "h", "m", "s", "ms", "us", "ns"]:
  105. return to_datetime(col, errors="coerce", unit=format, utc=utc)
  106. elif is_datetime64tz_dtype(col.dtype):
  107. # coerce to UTC timezone
  108. # GH11216
  109. return to_datetime(col, utc=True)
  110. else:
  111. return to_datetime(col, errors="coerce", format=format, utc=utc)
  112. def _parse_date_columns(data_frame, parse_dates):
  113. """
  114. Force non-datetime columns to be read as such.
  115. Supports both string formatted and integer timestamp columns.
  116. """
  117. parse_dates = _process_parse_dates_argument(parse_dates)
  118. # we want to coerce datetime64_tz dtypes for now to UTC
  119. # we could in theory do a 'nice' conversion from a FixedOffset tz
  120. # GH11216
  121. for col_name, df_col in data_frame.items():
  122. if is_datetime64tz_dtype(df_col.dtype) or col_name in parse_dates:
  123. try:
  124. fmt = parse_dates[col_name]
  125. except TypeError:
  126. fmt = None
  127. data_frame[col_name] = _handle_date_column(df_col, format=fmt)
  128. return data_frame
  129. def _wrap_result(
  130. data,
  131. columns,
  132. index_col=None,
  133. coerce_float: bool = True,
  134. parse_dates=None,
  135. dtype: DtypeArg | None = None,
  136. ):
  137. """Wrap result set of query in a DataFrame."""
  138. frame = DataFrame.from_records(data, columns=columns, coerce_float=coerce_float)
  139. if dtype:
  140. frame = frame.astype(dtype)
  141. frame = _parse_date_columns(frame, parse_dates)
  142. if index_col is not None:
  143. frame.set_index(index_col, inplace=True)
  144. return frame
  145. def execute(sql, con, cur=None, params=None):
  146. """
  147. Execute the given SQL query using the provided connection object.
  148. Parameters
  149. ----------
  150. sql : string
  151. SQL query to be executed.
  152. con : SQLAlchemy connectable(engine/connection) or sqlite3 connection
  153. Using SQLAlchemy makes it possible to use any DB supported by the
  154. library.
  155. If a DBAPI2 object, only sqlite3 is supported.
  156. cur : deprecated, cursor is obtained from connection, default: None
  157. params : list or tuple, optional, default: None
  158. List of parameters to pass to execute method.
  159. Returns
  160. -------
  161. Results Iterable
  162. """
  163. if cur is None:
  164. pandas_sql = pandasSQL_builder(con)
  165. else:
  166. pandas_sql = pandasSQL_builder(cur, is_cursor=True)
  167. args = _convert_params(sql, params)
  168. return pandas_sql.execute(*args)
  169. # -----------------------------------------------------------------------------
  170. # -- Read and write to DataFrames
  171. @overload
  172. def read_sql_table(
  173. table_name,
  174. con,
  175. schema=None,
  176. index_col=None,
  177. coerce_float=True,
  178. parse_dates=None,
  179. columns=None,
  180. chunksize: None = None,
  181. ) -> DataFrame:
  182. ...
  183. @overload
  184. def read_sql_table(
  185. table_name,
  186. con,
  187. schema=None,
  188. index_col=None,
  189. coerce_float=True,
  190. parse_dates=None,
  191. columns=None,
  192. chunksize: int = 1,
  193. ) -> Iterator[DataFrame]:
  194. ...
  195. def read_sql_table(
  196. table_name: str,
  197. con,
  198. schema: str | None = None,
  199. index_col: str | Sequence[str] | None = None,
  200. coerce_float: bool = True,
  201. parse_dates=None,
  202. columns=None,
  203. chunksize: int | None = None,
  204. ) -> DataFrame | Iterator[DataFrame]:
  205. """
  206. Read SQL database table into a DataFrame.
  207. Given a table name and a SQLAlchemy connectable, returns a DataFrame.
  208. This function does not support DBAPI connections.
  209. Parameters
  210. ----------
  211. table_name : str
  212. Name of SQL table in database.
  213. con : SQLAlchemy connectable or str
  214. A database URI could be provided as str.
  215. SQLite DBAPI connection mode not supported.
  216. schema : str, default None
  217. Name of SQL schema in database to query (if database flavor
  218. supports this). Uses default schema if None (default).
  219. index_col : str or list of str, optional, default: None
  220. Column(s) to set as index(MultiIndex).
  221. coerce_float : bool, default True
  222. Attempts to convert values of non-string, non-numeric objects (like
  223. decimal.Decimal) to floating point. Can result in loss of Precision.
  224. parse_dates : list or dict, default None
  225. - List of column names to parse as dates.
  226. - Dict of ``{column_name: format string}`` where format string is
  227. strftime compatible in case of parsing string times or is one of
  228. (D, s, ns, ms, us) in case of parsing integer timestamps.
  229. - Dict of ``{column_name: arg dict}``, where the arg dict corresponds
  230. to the keyword arguments of :func:`pandas.to_datetime`
  231. Especially useful with databases without native Datetime support,
  232. such as SQLite.
  233. columns : list, default None
  234. List of column names to select from SQL table.
  235. chunksize : int, default None
  236. If specified, returns an iterator where `chunksize` is the number of
  237. rows to include in each chunk.
  238. Returns
  239. -------
  240. DataFrame or Iterator[DataFrame]
  241. A SQL table is returned as two-dimensional data structure with labeled
  242. axes.
  243. See Also
  244. --------
  245. read_sql_query : Read SQL query into a DataFrame.
  246. read_sql : Read SQL query or database table into a DataFrame.
  247. Notes
  248. -----
  249. Any datetime values with time zone information will be converted to UTC.
  250. Examples
  251. --------
  252. >>> pd.read_sql_table('table_name', 'postgres:///db_name') # doctest:+SKIP
  253. """
  254. con = _engine_builder(con)
  255. if not _is_sqlalchemy_connectable(con):
  256. raise NotImplementedError(
  257. "read_sql_table only supported for SQLAlchemy connectable."
  258. )
  259. import sqlalchemy
  260. from sqlalchemy.schema import MetaData
  261. meta = MetaData(con, schema=schema)
  262. try:
  263. meta.reflect(only=[table_name], views=True)
  264. except sqlalchemy.exc.InvalidRequestError as err:
  265. raise ValueError(f"Table {table_name} not found") from err
  266. pandas_sql = SQLDatabase(con, meta=meta)
  267. table = pandas_sql.read_table(
  268. table_name,
  269. index_col=index_col,
  270. coerce_float=coerce_float,
  271. parse_dates=parse_dates,
  272. columns=columns,
  273. chunksize=chunksize,
  274. )
  275. if table is not None:
  276. return table
  277. else:
  278. raise ValueError(f"Table {table_name} not found", con)
  279. @overload
  280. def read_sql_query(
  281. sql,
  282. con,
  283. index_col=None,
  284. coerce_float=True,
  285. params=None,
  286. parse_dates=None,
  287. chunksize: None = None,
  288. dtype: DtypeArg | None = None,
  289. ) -> DataFrame:
  290. ...
  291. @overload
  292. def read_sql_query(
  293. sql,
  294. con,
  295. index_col=None,
  296. coerce_float=True,
  297. params=None,
  298. parse_dates=None,
  299. chunksize: int = 1,
  300. dtype: DtypeArg | None = None,
  301. ) -> Iterator[DataFrame]:
  302. ...
  303. def read_sql_query(
  304. sql,
  305. con,
  306. index_col=None,
  307. coerce_float: bool = True,
  308. params=None,
  309. parse_dates=None,
  310. chunksize: int | None = None,
  311. dtype: DtypeArg | None = None,
  312. ) -> DataFrame | Iterator[DataFrame]:
  313. """
  314. Read SQL query into a DataFrame.
  315. Returns a DataFrame corresponding to the result set of the query
  316. string. Optionally provide an `index_col` parameter to use one of the
  317. columns as the index, otherwise default integer index will be used.
  318. Parameters
  319. ----------
  320. sql : str SQL query or SQLAlchemy Selectable (select or text object)
  321. SQL query to be executed.
  322. con : SQLAlchemy connectable, str, or sqlite3 connection
  323. Using SQLAlchemy makes it possible to use any DB supported by that
  324. library. If a DBAPI2 object, only sqlite3 is supported.
  325. index_col : str or list of str, optional, default: None
  326. Column(s) to set as index(MultiIndex).
  327. coerce_float : bool, default True
  328. Attempts to convert values of non-string, non-numeric objects (like
  329. decimal.Decimal) to floating point. Useful for SQL result sets.
  330. params : list, tuple or dict, optional, default: None
  331. List of parameters to pass to execute method. The syntax used
  332. to pass parameters is database driver dependent. Check your
  333. database driver documentation for which of the five syntax styles,
  334. described in PEP 249's paramstyle, is supported.
  335. Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}.
  336. parse_dates : list or dict, default: None
  337. - List of column names to parse as dates.
  338. - Dict of ``{column_name: format string}`` where format string is
  339. strftime compatible in case of parsing string times, or is one of
  340. (D, s, ns, ms, us) in case of parsing integer timestamps.
  341. - Dict of ``{column_name: arg dict}``, where the arg dict corresponds
  342. to the keyword arguments of :func:`pandas.to_datetime`
  343. Especially useful with databases without native Datetime support,
  344. such as SQLite.
  345. chunksize : int, default None
  346. If specified, return an iterator where `chunksize` is the number of
  347. rows to include in each chunk.
  348. dtype : Type name or dict of columns
  349. Data type for data or columns. E.g. np.float64 or
  350. {‘a’: np.float64, ‘b’: np.int32, ‘c’: ‘Int64’}
  351. .. versionadded:: 1.3.0
  352. Returns
  353. -------
  354. DataFrame or Iterator[DataFrame]
  355. See Also
  356. --------
  357. read_sql_table : Read SQL database table into a DataFrame.
  358. read_sql : Read SQL query or database table into a DataFrame.
  359. Notes
  360. -----
  361. Any datetime values with time zone information parsed via the `parse_dates`
  362. parameter will be converted to UTC.
  363. """
  364. pandas_sql = pandasSQL_builder(con)
  365. return pandas_sql.read_query(
  366. sql,
  367. index_col=index_col,
  368. params=params,
  369. coerce_float=coerce_float,
  370. parse_dates=parse_dates,
  371. chunksize=chunksize,
  372. dtype=dtype,
  373. )
  374. @overload
  375. def read_sql(
  376. sql,
  377. con,
  378. index_col=None,
  379. coerce_float=True,
  380. params=None,
  381. parse_dates=None,
  382. columns=None,
  383. chunksize: None = None,
  384. ) -> DataFrame:
  385. ...
  386. @overload
  387. def read_sql(
  388. sql,
  389. con,
  390. index_col=None,
  391. coerce_float=True,
  392. params=None,
  393. parse_dates=None,
  394. columns=None,
  395. chunksize: int = 1,
  396. ) -> Iterator[DataFrame]:
  397. ...
  398. def read_sql(
  399. sql,
  400. con,
  401. index_col: str | Sequence[str] | None = None,
  402. coerce_float: bool = True,
  403. params=None,
  404. parse_dates=None,
  405. columns=None,
  406. chunksize: int | None = None,
  407. ) -> DataFrame | Iterator[DataFrame]:
  408. """
  409. Read SQL query or database table into a DataFrame.
  410. This function is a convenience wrapper around ``read_sql_table`` and
  411. ``read_sql_query`` (for backward compatibility). It will delegate
  412. to the specific function depending on the provided input. A SQL query
  413. will be routed to ``read_sql_query``, while a database table name will
  414. be routed to ``read_sql_table``. Note that the delegated function might
  415. have more specific notes about their functionality not listed here.
  416. Parameters
  417. ----------
  418. sql : str or SQLAlchemy Selectable (select or text object)
  419. SQL query to be executed or a table name.
  420. con : SQLAlchemy connectable, str, or sqlite3 connection
  421. Using SQLAlchemy makes it possible to use any DB supported by that
  422. library. If a DBAPI2 object, only sqlite3 is supported. The user is responsible
  423. for engine disposal and connection closure for the SQLAlchemy connectable; str
  424. connections are closed automatically. See
  425. `here <https://docs.sqlalchemy.org/en/13/core/connections.html>`_.
  426. index_col : str or list of str, optional, default: None
  427. Column(s) to set as index(MultiIndex).
  428. coerce_float : bool, default True
  429. Attempts to convert values of non-string, non-numeric objects (like
  430. decimal.Decimal) to floating point, useful for SQL result sets.
  431. params : list, tuple or dict, optional, default: None
  432. List of parameters to pass to execute method. The syntax used
  433. to pass parameters is database driver dependent. Check your
  434. database driver documentation for which of the five syntax styles,
  435. described in PEP 249's paramstyle, is supported.
  436. Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}.
  437. parse_dates : list or dict, default: None
  438. - List of column names to parse as dates.
  439. - Dict of ``{column_name: format string}`` where format string is
  440. strftime compatible in case of parsing string times, or is one of
  441. (D, s, ns, ms, us) in case of parsing integer timestamps.
  442. - Dict of ``{column_name: arg dict}``, where the arg dict corresponds
  443. to the keyword arguments of :func:`pandas.to_datetime`
  444. Especially useful with databases without native Datetime support,
  445. such as SQLite.
  446. columns : list, default: None
  447. List of column names to select from SQL table (only used when reading
  448. a table).
  449. chunksize : int, default None
  450. If specified, return an iterator where `chunksize` is the
  451. number of rows to include in each chunk.
  452. Returns
  453. -------
  454. DataFrame or Iterator[DataFrame]
  455. See Also
  456. --------
  457. read_sql_table : Read SQL database table into a DataFrame.
  458. read_sql_query : Read SQL query into a DataFrame.
  459. Examples
  460. --------
  461. Read data from SQL via either a SQL query or a SQL tablename.
  462. When using a SQLite database only SQL queries are accepted,
  463. providing only the SQL tablename will result in an error.
  464. >>> from sqlite3 import connect
  465. >>> conn = connect(':memory:')
  466. >>> df = pd.DataFrame(data=[[0, '10/11/12'], [1, '12/11/10']],
  467. ... columns=['int_column', 'date_column'])
  468. >>> df.to_sql('test_data', conn)
  469. >>> pd.read_sql('SELECT int_column, date_column FROM test_data', conn)
  470. int_column date_column
  471. 0 0 10/11/12
  472. 1 1 12/11/10
  473. >>> pd.read_sql('test_data', 'postgres:///db_name') # doctest:+SKIP
  474. Apply date parsing to columns through the ``parse_dates`` argument
  475. >>> pd.read_sql('SELECT int_column, date_column FROM test_data',
  476. ... conn,
  477. ... parse_dates=["date_column"])
  478. int_column date_column
  479. 0 0 2012-10-11
  480. 1 1 2010-12-11
  481. The ``parse_dates`` argument calls ``pd.to_datetime`` on the provided columns.
  482. Custom argument values for applying ``pd.to_datetime`` on a column are specified
  483. via a dictionary format:
  484. 1. Ignore errors while parsing the values of "date_column"
  485. >>> pd.read_sql('SELECT int_column, date_column FROM test_data',
  486. ... conn,
  487. ... parse_dates={"date_column": {"errors": "ignore"}})
  488. int_column date_column
  489. 0 0 2012-10-11
  490. 1 1 2010-12-11
  491. 2. Apply a dayfirst date parsing order on the values of "date_column"
  492. >>> pd.read_sql('SELECT int_column, date_column FROM test_data',
  493. ... conn,
  494. ... parse_dates={"date_column": {"dayfirst": True}})
  495. int_column date_column
  496. 0 0 2012-11-10
  497. 1 1 2010-11-12
  498. 3. Apply custom formatting when date parsing the values of "date_column"
  499. >>> pd.read_sql('SELECT int_column, date_column FROM test_data',
  500. ... conn,
  501. ... parse_dates={"date_column": {"format": "%d/%m/%y"}})
  502. int_column date_column
  503. 0 0 2012-11-10
  504. 1 1 2010-11-12
  505. """
  506. pandas_sql = pandasSQL_builder(con)
  507. if isinstance(pandas_sql, SQLiteDatabase):
  508. return pandas_sql.read_query(
  509. sql,
  510. index_col=index_col,
  511. params=params,
  512. coerce_float=coerce_float,
  513. parse_dates=parse_dates,
  514. chunksize=chunksize,
  515. )
  516. try:
  517. _is_table_name = pandas_sql.has_table(sql)
  518. except Exception:
  519. # using generic exception to catch errors from sql drivers (GH24988)
  520. _is_table_name = False
  521. if _is_table_name:
  522. pandas_sql.meta.reflect(only=[sql])
  523. return pandas_sql.read_table(
  524. sql,
  525. index_col=index_col,
  526. coerce_float=coerce_float,
  527. parse_dates=parse_dates,
  528. columns=columns,
  529. chunksize=chunksize,
  530. )
  531. else:
  532. return pandas_sql.read_query(
  533. sql,
  534. index_col=index_col,
  535. params=params,
  536. coerce_float=coerce_float,
  537. parse_dates=parse_dates,
  538. chunksize=chunksize,
  539. )
  540. def to_sql(
  541. frame,
  542. name: str,
  543. con,
  544. schema: str | None = None,
  545. if_exists: str = "fail",
  546. index: bool = True,
  547. index_label=None,
  548. chunksize: int | None = None,
  549. dtype: DtypeArg | None = None,
  550. method: str | None = None,
  551. engine: str = "auto",
  552. **engine_kwargs,
  553. ) -> None:
  554. """
  555. Write records stored in a DataFrame to a SQL database.
  556. Parameters
  557. ----------
  558. frame : DataFrame, Series
  559. name : str
  560. Name of SQL table.
  561. con : SQLAlchemy connectable(engine/connection) or database string URI
  562. or sqlite3 DBAPI2 connection
  563. Using SQLAlchemy makes it possible to use any DB supported by that
  564. library.
  565. If a DBAPI2 object, only sqlite3 is supported.
  566. schema : str, optional
  567. Name of SQL schema in database to write to (if database flavor
  568. supports this). If None, use default schema (default).
  569. if_exists : {'fail', 'replace', 'append'}, default 'fail'
  570. - fail: If table exists, do nothing.
  571. - replace: If table exists, drop it, recreate it, and insert data.
  572. - append: If table exists, insert data. Create if does not exist.
  573. index : bool, default True
  574. Write DataFrame index as a column.
  575. index_label : str or sequence, optional
  576. Column label for index column(s). If None is given (default) and
  577. `index` is True, then the index names are used.
  578. A sequence should be given if the DataFrame uses MultiIndex.
  579. chunksize : int, optional
  580. Specify the number of rows in each batch to be written at a time.
  581. By default, all rows will be written at once.
  582. dtype : dict or scalar, optional
  583. Specifying the datatype for columns. If a dictionary is used, the
  584. keys should be the column names and the values should be the
  585. SQLAlchemy types or strings for the sqlite3 fallback mode. If a
  586. scalar is provided, it will be applied to all columns.
  587. method : {None, 'multi', callable}, optional
  588. Controls the SQL insertion clause used:
  589. - None : Uses standard SQL ``INSERT`` clause (one per row).
  590. - 'multi': Pass multiple values in a single ``INSERT`` clause.
  591. - callable with signature ``(pd_table, conn, keys, data_iter)``.
  592. Details and a sample callable implementation can be found in the
  593. section :ref:`insert method <io.sql.method>`.
  594. engine : {'auto', 'sqlalchemy'}, default 'auto'
  595. SQL engine library to use. If 'auto', then the option
  596. ``io.sql.engine`` is used. The default ``io.sql.engine``
  597. behavior is 'sqlalchemy'
  598. .. versionadded:: 1.3.0
  599. **engine_kwargs
  600. Any additional kwargs are passed to the engine.
  601. """
  602. if if_exists not in ("fail", "replace", "append"):
  603. raise ValueError(f"'{if_exists}' is not valid for if_exists")
  604. pandas_sql = pandasSQL_builder(con, schema=schema)
  605. if isinstance(frame, Series):
  606. frame = frame.to_frame()
  607. elif not isinstance(frame, DataFrame):
  608. raise NotImplementedError(
  609. "'frame' argument should be either a Series or a DataFrame"
  610. )
  611. pandas_sql.to_sql(
  612. frame,
  613. name,
  614. if_exists=if_exists,
  615. index=index,
  616. index_label=index_label,
  617. schema=schema,
  618. chunksize=chunksize,
  619. dtype=dtype,
  620. method=method,
  621. engine=engine,
  622. **engine_kwargs,
  623. )
  624. def has_table(table_name: str, con, schema: str | None = None):
  625. """
  626. Check if DataBase has named table.
  627. Parameters
  628. ----------
  629. table_name: string
  630. Name of SQL table.
  631. con: SQLAlchemy connectable(engine/connection) or sqlite3 DBAPI2 connection
  632. Using SQLAlchemy makes it possible to use any DB supported by that
  633. library.
  634. If a DBAPI2 object, only sqlite3 is supported.
  635. schema : string, default None
  636. Name of SQL schema in database to write to (if database flavor supports
  637. this). If None, use default schema (default).
  638. Returns
  639. -------
  640. boolean
  641. """
  642. pandas_sql = pandasSQL_builder(con, schema=schema)
  643. return pandas_sql.has_table(table_name)
  644. table_exists = has_table
  645. def _engine_builder(con):
  646. """
  647. Returns a SQLAlchemy engine from a URI (if con is a string)
  648. else it just return con without modifying it.
  649. """
  650. global _SQLALCHEMY_INSTALLED
  651. if isinstance(con, str):
  652. try:
  653. import sqlalchemy
  654. except ImportError:
  655. _SQLALCHEMY_INSTALLED = False
  656. else:
  657. con = sqlalchemy.create_engine(con)
  658. return con
  659. return con
  660. def pandasSQL_builder(
  661. con, schema: str | None = None, meta=None, is_cursor: bool = False
  662. ):
  663. """
  664. Convenience function to return the correct PandasSQL subclass based on the
  665. provided parameters.
  666. """
  667. # When support for DBAPI connections is removed,
  668. # is_cursor should not be necessary.
  669. con = _engine_builder(con)
  670. if _is_sqlalchemy_connectable(con):
  671. return SQLDatabase(con, schema=schema, meta=meta)
  672. elif isinstance(con, str):
  673. raise ImportError("Using URI string without sqlalchemy installed.")
  674. else:
  675. return SQLiteDatabase(con, is_cursor=is_cursor)
  676. class SQLTable(PandasObject):
  677. """
  678. For mapping Pandas tables to SQL tables.
  679. Uses fact that table is reflected by SQLAlchemy to
  680. do better type conversions.
  681. Also holds various flags needed to avoid having to
  682. pass them between functions all the time.
  683. """
  684. # TODO: support for multiIndex
  685. def __init__(
  686. self,
  687. name: str,
  688. pandas_sql_engine,
  689. frame=None,
  690. index=True,
  691. if_exists="fail",
  692. prefix="pandas",
  693. index_label=None,
  694. schema=None,
  695. keys=None,
  696. dtype: DtypeArg | None = None,
  697. ):
  698. self.name = name
  699. self.pd_sql = pandas_sql_engine
  700. self.prefix = prefix
  701. self.frame = frame
  702. self.index = self._index_name(index, index_label)
  703. self.schema = schema
  704. self.if_exists = if_exists
  705. self.keys = keys
  706. self.dtype = dtype
  707. if frame is not None:
  708. # We want to initialize based on a dataframe
  709. self.table = self._create_table_setup()
  710. else:
  711. # no data provided, read-only mode
  712. self.table = self.pd_sql.get_table(self.name, self.schema)
  713. if self.table is None:
  714. raise ValueError(f"Could not init table '{name}'")
  715. def exists(self):
  716. return self.pd_sql.has_table(self.name, self.schema)
  717. def sql_schema(self):
  718. from sqlalchemy.schema import CreateTable
  719. return str(CreateTable(self.table).compile(self.pd_sql.connectable))
  720. def _execute_create(self):
  721. # Inserting table into database, add to MetaData object
  722. if _gt14():
  723. self.table = self.table.to_metadata(self.pd_sql.meta)
  724. else:
  725. self.table = self.table.tometadata(self.pd_sql.meta)
  726. self.table.create()
  727. def create(self):
  728. if self.exists():
  729. if self.if_exists == "fail":
  730. raise ValueError(f"Table '{self.name}' already exists.")
  731. elif self.if_exists == "replace":
  732. self.pd_sql.drop_table(self.name, self.schema)
  733. self._execute_create()
  734. elif self.if_exists == "append":
  735. pass
  736. else:
  737. raise ValueError(f"'{self.if_exists}' is not valid for if_exists")
  738. else:
  739. self._execute_create()
  740. def _execute_insert(self, conn, keys: list[str], data_iter):
  741. """
  742. Execute SQL statement inserting data
  743. Parameters
  744. ----------
  745. conn : sqlalchemy.engine.Engine or sqlalchemy.engine.Connection
  746. keys : list of str
  747. Column names
  748. data_iter : generator of list
  749. Each item contains a list of values to be inserted
  750. """
  751. data = [dict(zip(keys, row)) for row in data_iter]
  752. conn.execute(self.table.insert(), data)
  753. def _execute_insert_multi(self, conn, keys: list[str], data_iter):
  754. """
  755. Alternative to _execute_insert for DBs support multivalue INSERT.
  756. Note: multi-value insert is usually faster for analytics DBs
  757. and tables containing a few columns
  758. but performance degrades quickly with increase of columns.
  759. """
  760. data = [dict(zip(keys, row)) for row in data_iter]
  761. conn.execute(self.table.insert(data))
  762. def insert_data(self):
  763. if self.index is not None:
  764. temp = self.frame.copy()
  765. temp.index.names = self.index
  766. try:
  767. temp.reset_index(inplace=True)
  768. except ValueError as err:
  769. raise ValueError(f"duplicate name in index/columns: {err}") from err
  770. else:
  771. temp = self.frame
  772. column_names = list(map(str, temp.columns))
  773. ncols = len(column_names)
  774. data_list = [None] * ncols
  775. for i, (_, ser) in enumerate(temp.items()):
  776. vals = ser._values
  777. if vals.dtype.kind == "M":
  778. d = vals.to_pydatetime()
  779. elif vals.dtype.kind == "m":
  780. # store as integers, see GH#6921, GH#7076
  781. d = vals.view("i8").astype(object)
  782. else:
  783. d = vals.astype(object)
  784. assert isinstance(d, np.ndarray), type(d)
  785. if ser._can_hold_na:
  786. # Note: this will miss timedeltas since they are converted to int
  787. mask = isna(d)
  788. d[mask] = None
  789. # error: No overload variant of "__setitem__" of "list" matches
  790. # argument types "int", "ndarray"
  791. data_list[i] = d # type: ignore[call-overload]
  792. return column_names, data_list
  793. def insert(self, chunksize: int | None = None, method: str | None = None):
  794. # set insert method
  795. if method is None:
  796. exec_insert = self._execute_insert
  797. elif method == "multi":
  798. exec_insert = self._execute_insert_multi
  799. elif callable(method):
  800. exec_insert = partial(method, self)
  801. else:
  802. raise ValueError(f"Invalid parameter `method`: {method}")
  803. keys, data_list = self.insert_data()
  804. nrows = len(self.frame)
  805. if nrows == 0:
  806. return
  807. if chunksize is None:
  808. chunksize = nrows
  809. elif chunksize == 0:
  810. raise ValueError("chunksize argument should be non-zero")
  811. chunks = (nrows // chunksize) + 1
  812. with self.pd_sql.run_transaction() as conn:
  813. for i in range(chunks):
  814. start_i = i * chunksize
  815. end_i = min((i + 1) * chunksize, nrows)
  816. if start_i >= end_i:
  817. break
  818. chunk_iter = zip(*[arr[start_i:end_i] for arr in data_list])
  819. exec_insert(conn, keys, chunk_iter)
  820. def _query_iterator(
  821. self,
  822. result,
  823. chunksize: str | None,
  824. columns,
  825. coerce_float: bool = True,
  826. parse_dates=None,
  827. ):
  828. """Return generator through chunked result set."""
  829. has_read_data = False
  830. while True:
  831. data = result.fetchmany(chunksize)
  832. if not data:
  833. if not has_read_data:
  834. yield DataFrame.from_records(
  835. [], columns=columns, coerce_float=coerce_float
  836. )
  837. break
  838. else:
  839. has_read_data = True
  840. self.frame = DataFrame.from_records(
  841. data, columns=columns, coerce_float=coerce_float
  842. )
  843. self._harmonize_columns(parse_dates=parse_dates)
  844. if self.index is not None:
  845. self.frame.set_index(self.index, inplace=True)
  846. yield self.frame
  847. def read(self, coerce_float=True, parse_dates=None, columns=None, chunksize=None):
  848. if columns is not None and len(columns) > 0:
  849. from sqlalchemy import select
  850. cols = [self.table.c[n] for n in columns]
  851. if self.index is not None:
  852. for idx in self.index[::-1]:
  853. cols.insert(0, self.table.c[idx])
  854. sql_select = select(cols)
  855. else:
  856. sql_select = self.table.select()
  857. result = self.pd_sql.execute(sql_select)
  858. column_names = result.keys()
  859. if chunksize is not None:
  860. return self._query_iterator(
  861. result,
  862. chunksize,
  863. column_names,
  864. coerce_float=coerce_float,
  865. parse_dates=parse_dates,
  866. )
  867. else:
  868. data = result.fetchall()
  869. self.frame = DataFrame.from_records(
  870. data, columns=column_names, coerce_float=coerce_float
  871. )
  872. self._harmonize_columns(parse_dates=parse_dates)
  873. if self.index is not None:
  874. self.frame.set_index(self.index, inplace=True)
  875. return self.frame
  876. def _index_name(self, index, index_label):
  877. # for writing: index=True to include index in sql table
  878. if index is True:
  879. nlevels = self.frame.index.nlevels
  880. # if index_label is specified, set this as index name(s)
  881. if index_label is not None:
  882. if not isinstance(index_label, list):
  883. index_label = [index_label]
  884. if len(index_label) != nlevels:
  885. raise ValueError(
  886. "Length of 'index_label' should match number of "
  887. f"levels, which is {nlevels}"
  888. )
  889. else:
  890. return index_label
  891. # return the used column labels for the index columns
  892. if (
  893. nlevels == 1
  894. and "index" not in self.frame.columns
  895. and self.frame.index.name is None
  896. ):
  897. return ["index"]
  898. else:
  899. return [
  900. l if l is not None else f"level_{i}"
  901. for i, l in enumerate(self.frame.index.names)
  902. ]
  903. # for reading: index=(list of) string to specify column to set as index
  904. elif isinstance(index, str):
  905. return [index]
  906. elif isinstance(index, list):
  907. return index
  908. else:
  909. return None
  910. def _get_column_names_and_types(self, dtype_mapper):
  911. column_names_and_types = []
  912. if self.index is not None:
  913. for i, idx_label in enumerate(self.index):
  914. idx_type = dtype_mapper(self.frame.index._get_level_values(i))
  915. column_names_and_types.append((str(idx_label), idx_type, True))
  916. column_names_and_types += [
  917. (str(self.frame.columns[i]), dtype_mapper(self.frame.iloc[:, i]), False)
  918. for i in range(len(self.frame.columns))
  919. ]
  920. return column_names_and_types
  921. def _create_table_setup(self):
  922. from sqlalchemy import (
  923. Column,
  924. PrimaryKeyConstraint,
  925. Table,
  926. )
  927. column_names_and_types = self._get_column_names_and_types(self._sqlalchemy_type)
  928. columns = [
  929. Column(name, typ, index=is_index)
  930. for name, typ, is_index in column_names_and_types
  931. ]
  932. if self.keys is not None:
  933. if not is_list_like(self.keys):
  934. keys = [self.keys]
  935. else:
  936. keys = self.keys
  937. pkc = PrimaryKeyConstraint(*keys, name=self.name + "_pk")
  938. columns.append(pkc)
  939. schema = self.schema or self.pd_sql.meta.schema
  940. # At this point, attach to new metadata, only attach to self.meta
  941. # once table is created.
  942. from sqlalchemy.schema import MetaData
  943. meta = MetaData(self.pd_sql, schema=schema)
  944. return Table(self.name, meta, *columns, schema=schema)
  945. def _harmonize_columns(self, parse_dates=None):
  946. """
  947. Make the DataFrame's column types align with the SQL table
  948. column types.
  949. Need to work around limited NA value support. Floats are always
  950. fine, ints must always be floats if there are Null values.
  951. Booleans are hard because converting bool column with None replaces
  952. all Nones with false. Therefore only convert bool if there are no
  953. NA values.
  954. Datetimes should already be converted to np.datetime64 if supported,
  955. but here we also force conversion if required.
  956. """
  957. parse_dates = _process_parse_dates_argument(parse_dates)
  958. for sql_col in self.table.columns:
  959. col_name = sql_col.name
  960. try:
  961. df_col = self.frame[col_name]
  962. # Handle date parsing upfront; don't try to convert columns
  963. # twice
  964. if col_name in parse_dates:
  965. try:
  966. fmt = parse_dates[col_name]
  967. except TypeError:
  968. fmt = None
  969. self.frame[col_name] = _handle_date_column(df_col, format=fmt)
  970. continue
  971. # the type the dataframe column should have
  972. col_type = self._get_dtype(sql_col.type)
  973. if (
  974. col_type is datetime
  975. or col_type is date
  976. or col_type is DatetimeTZDtype
  977. ):
  978. # Convert tz-aware Datetime SQL columns to UTC
  979. utc = col_type is DatetimeTZDtype
  980. self.frame[col_name] = _handle_date_column(df_col, utc=utc)
  981. elif col_type is float:
  982. # floats support NA, can always convert!
  983. self.frame[col_name] = df_col.astype(col_type, copy=False)
  984. elif len(df_col) == df_col.count():
  985. # No NA values, can convert ints and bools
  986. if col_type is np.dtype("int64") or col_type is bool:
  987. self.frame[col_name] = df_col.astype(col_type, copy=False)
  988. except KeyError:
  989. pass # this column not in results
  990. def _sqlalchemy_type(self, col):
  991. dtype: DtypeArg = self.dtype or {}
  992. if is_dict_like(dtype):
  993. dtype = cast(dict, dtype)
  994. if col.name in dtype:
  995. return dtype[col.name]
  996. # Infer type of column, while ignoring missing values.
  997. # Needed for inserting typed data containing NULLs, GH 8778.
  998. col_type = lib.infer_dtype(col, skipna=True)
  999. from sqlalchemy.types import (
  1000. TIMESTAMP,
  1001. BigInteger,
  1002. Boolean,
  1003. Date,
  1004. DateTime,
  1005. Float,
  1006. Integer,
  1007. SmallInteger,
  1008. Text,
  1009. Time,
  1010. )
  1011. if col_type == "datetime64" or col_type == "datetime":
  1012. # GH 9086: TIMESTAMP is the suggested type if the column contains
  1013. # timezone information
  1014. try:
  1015. if col.dt.tz is not None:
  1016. return TIMESTAMP(timezone=True)
  1017. except AttributeError:
  1018. # The column is actually a DatetimeIndex
  1019. # GH 26761 or an Index with date-like data e.g. 9999-01-01
  1020. if getattr(col, "tz", None) is not None:
  1021. return TIMESTAMP(timezone=True)
  1022. return DateTime
  1023. if col_type == "timedelta64":
  1024. warnings.warn(
  1025. "the 'timedelta' type is not supported, and will be "
  1026. "written as integer values (ns frequency) to the database.",
  1027. UserWarning,
  1028. stacklevel=8,
  1029. )
  1030. return BigInteger
  1031. elif col_type == "floating":
  1032. if col.dtype == "float32":
  1033. return Float(precision=23)
  1034. else:
  1035. return Float(precision=53)
  1036. elif col_type == "integer":
  1037. # GH35076 Map pandas integer to optimal SQLAlchemy integer type
  1038. if col.dtype.name.lower() in ("int8", "uint8", "int16"):
  1039. return SmallInteger
  1040. elif col.dtype.name.lower() in ("uint16", "int32"):
  1041. return Integer
  1042. elif col.dtype.name.lower() == "uint64":
  1043. raise ValueError("Unsigned 64 bit integer datatype is not supported")
  1044. else:
  1045. return BigInteger
  1046. elif col_type == "boolean":
  1047. return Boolean
  1048. elif col_type == "date":
  1049. return Date
  1050. elif col_type == "time":
  1051. return Time
  1052. elif col_type == "complex":
  1053. raise ValueError("Complex datatypes not supported")
  1054. return Text
  1055. def _get_dtype(self, sqltype):
  1056. from sqlalchemy.types import (
  1057. TIMESTAMP,
  1058. Boolean,
  1059. Date,
  1060. DateTime,
  1061. Float,
  1062. Integer,
  1063. )
  1064. if isinstance(sqltype, Float):
  1065. return float
  1066. elif isinstance(sqltype, Integer):
  1067. # TODO: Refine integer size.
  1068. return np.dtype("int64")
  1069. elif isinstance(sqltype, TIMESTAMP):
  1070. # we have a timezone capable type
  1071. if not sqltype.timezone:
  1072. return datetime
  1073. return DatetimeTZDtype
  1074. elif isinstance(sqltype, DateTime):
  1075. # Caution: np.datetime64 is also a subclass of np.number.
  1076. return datetime
  1077. elif isinstance(sqltype, Date):
  1078. return date
  1079. elif isinstance(sqltype, Boolean):
  1080. return bool
  1081. return object
  1082. class PandasSQL(PandasObject):
  1083. """
  1084. Subclasses Should define read_sql and to_sql.
  1085. """
  1086. def read_sql(self, *args, **kwargs):
  1087. raise ValueError(
  1088. "PandasSQL must be created with an SQLAlchemy "
  1089. "connectable or sqlite connection"
  1090. )
  1091. def to_sql(
  1092. self,
  1093. frame,
  1094. name,
  1095. if_exists="fail",
  1096. index=True,
  1097. index_label=None,
  1098. schema=None,
  1099. chunksize=None,
  1100. dtype: DtypeArg | None = None,
  1101. method=None,
  1102. ):
  1103. raise ValueError(
  1104. "PandasSQL must be created with an SQLAlchemy "
  1105. "connectable or sqlite connection"
  1106. )
  1107. class BaseEngine:
  1108. def insert_records(
  1109. self,
  1110. table: SQLTable,
  1111. con,
  1112. frame,
  1113. name,
  1114. index=True,
  1115. schema=None,
  1116. chunksize=None,
  1117. method=None,
  1118. **engine_kwargs,
  1119. ):
  1120. """
  1121. Inserts data into already-prepared table
  1122. """
  1123. raise AbstractMethodError(self)
  1124. class SQLAlchemyEngine(BaseEngine):
  1125. def __init__(self):
  1126. import_optional_dependency(
  1127. "sqlalchemy", extra="sqlalchemy is required for SQL support."
  1128. )
  1129. def insert_records(
  1130. self,
  1131. table: SQLTable,
  1132. con,
  1133. frame,
  1134. name,
  1135. index=True,
  1136. schema=None,
  1137. chunksize=None,
  1138. method=None,
  1139. **engine_kwargs,
  1140. ):
  1141. from sqlalchemy import exc
  1142. try:
  1143. table.insert(chunksize=chunksize, method=method)
  1144. except exc.SQLAlchemyError as err:
  1145. # GH34431
  1146. # https://stackoverflow.com/a/67358288/6067848
  1147. msg = r"""(\(1054, "Unknown column 'inf(e0)?' in 'field list'"\))(?#
  1148. )|inf can not be used with MySQL"""
  1149. err_text = str(err.orig)
  1150. if re.search(msg, err_text):
  1151. raise ValueError("inf cannot be used with MySQL") from err
  1152. else:
  1153. raise err
  1154. def get_engine(engine: str) -> BaseEngine:
  1155. """return our implementation"""
  1156. if engine == "auto":
  1157. engine = get_option("io.sql.engine")
  1158. if engine == "auto":
  1159. # try engines in this order
  1160. engine_classes = [SQLAlchemyEngine]
  1161. error_msgs = ""
  1162. for engine_class in engine_classes:
  1163. try:
  1164. return engine_class()
  1165. except ImportError as err:
  1166. error_msgs += "\n - " + str(err)
  1167. raise ImportError(
  1168. "Unable to find a usable engine; "
  1169. "tried using: 'sqlalchemy'.\n"
  1170. "A suitable version of "
  1171. "sqlalchemy is required for sql I/O "
  1172. "support.\n"
  1173. "Trying to import the above resulted in these errors:"
  1174. f"{error_msgs}"
  1175. )
  1176. elif engine == "sqlalchemy":
  1177. return SQLAlchemyEngine()
  1178. raise ValueError("engine must be one of 'auto', 'sqlalchemy'")
  1179. class SQLDatabase(PandasSQL):
  1180. """
  1181. This class enables conversion between DataFrame and SQL databases
  1182. using SQLAlchemy to handle DataBase abstraction.
  1183. Parameters
  1184. ----------
  1185. engine : SQLAlchemy connectable
  1186. Connectable to connect with the database. Using SQLAlchemy makes it
  1187. possible to use any DB supported by that library.
  1188. schema : string, default None
  1189. Name of SQL schema in database to write to (if database flavor
  1190. supports this). If None, use default schema (default).
  1191. meta : SQLAlchemy MetaData object, default None
  1192. If provided, this MetaData object is used instead of a newly
  1193. created. This allows to specify database flavor specific
  1194. arguments in the MetaData object.
  1195. """
  1196. def __init__(self, engine, schema: str | None = None, meta=None):
  1197. self.connectable = engine
  1198. if not meta:
  1199. from sqlalchemy.schema import MetaData
  1200. meta = MetaData(self.connectable, schema=schema)
  1201. self.meta = meta
  1202. @contextmanager
  1203. def run_transaction(self):
  1204. with self.connectable.begin() as tx:
  1205. if hasattr(tx, "execute"):
  1206. yield tx
  1207. else:
  1208. yield self.connectable
  1209. def execute(self, *args, **kwargs):
  1210. """Simple passthrough to SQLAlchemy connectable"""
  1211. return self.connectable.execution_options().execute(*args, **kwargs)
  1212. def read_table(
  1213. self,
  1214. table_name: str,
  1215. index_col: str | Sequence[str] | None = None,
  1216. coerce_float: bool = True,
  1217. parse_dates=None,
  1218. columns=None,
  1219. schema: str | None = None,
  1220. chunksize: int | None = None,
  1221. ):
  1222. """
  1223. Read SQL database table into a DataFrame.
  1224. Parameters
  1225. ----------
  1226. table_name : str
  1227. Name of SQL table in database.
  1228. index_col : string, optional, default: None
  1229. Column to set as index.
  1230. coerce_float : bool, default True
  1231. Attempts to convert values of non-string, non-numeric objects
  1232. (like decimal.Decimal) to floating point. This can result in
  1233. loss of precision.
  1234. parse_dates : list or dict, default: None
  1235. - List of column names to parse as dates.
  1236. - Dict of ``{column_name: format string}`` where format string is
  1237. strftime compatible in case of parsing string times, or is one of
  1238. (D, s, ns, ms, us) in case of parsing integer timestamps.
  1239. - Dict of ``{column_name: arg}``, where the arg corresponds
  1240. to the keyword arguments of :func:`pandas.to_datetime`.
  1241. Especially useful with databases without native Datetime support,
  1242. such as SQLite.
  1243. columns : list, default: None
  1244. List of column names to select from SQL table.
  1245. schema : string, default None
  1246. Name of SQL schema in database to query (if database flavor
  1247. supports this). If specified, this overwrites the default
  1248. schema of the SQL database object.
  1249. chunksize : int, default None
  1250. If specified, return an iterator where `chunksize` is the number
  1251. of rows to include in each chunk.
  1252. Returns
  1253. -------
  1254. DataFrame
  1255. See Also
  1256. --------
  1257. pandas.read_sql_table
  1258. SQLDatabase.read_query
  1259. """
  1260. table = SQLTable(table_name, self, index=index_col, schema=schema)
  1261. return table.read(
  1262. coerce_float=coerce_float,
  1263. parse_dates=parse_dates,
  1264. columns=columns,
  1265. chunksize=chunksize,
  1266. )
  1267. @staticmethod
  1268. def _query_iterator(
  1269. result,
  1270. chunksize: int,
  1271. columns,
  1272. index_col=None,
  1273. coerce_float=True,
  1274. parse_dates=None,
  1275. dtype: DtypeArg | None = None,
  1276. ):
  1277. """Return generator through chunked result set"""
  1278. has_read_data = False
  1279. while True:
  1280. data = result.fetchmany(chunksize)
  1281. if not data:
  1282. if not has_read_data:
  1283. yield _wrap_result(
  1284. [],
  1285. columns,
  1286. index_col=index_col,
  1287. coerce_float=coerce_float,
  1288. parse_dates=parse_dates,
  1289. )
  1290. break
  1291. else:
  1292. has_read_data = True
  1293. yield _wrap_result(
  1294. data,
  1295. columns,
  1296. index_col=index_col,
  1297. coerce_float=coerce_float,
  1298. parse_dates=parse_dates,
  1299. dtype=dtype,
  1300. )
  1301. def read_query(
  1302. self,
  1303. sql: str,
  1304. index_col: str | None = None,
  1305. coerce_float: bool = True,
  1306. parse_dates=None,
  1307. params=None,
  1308. chunksize: int | None = None,
  1309. dtype: DtypeArg | None = None,
  1310. ):
  1311. """
  1312. Read SQL query into a DataFrame.
  1313. Parameters
  1314. ----------
  1315. sql : str
  1316. SQL query to be executed.
  1317. index_col : string, optional, default: None
  1318. Column name to use as index for the returned DataFrame object.
  1319. coerce_float : bool, default True
  1320. Attempt to convert values of non-string, non-numeric objects (like
  1321. decimal.Decimal) to floating point, useful for SQL result sets.
  1322. params : list, tuple or dict, optional, default: None
  1323. List of parameters to pass to execute method. The syntax used
  1324. to pass parameters is database driver dependent. Check your
  1325. database driver documentation for which of the five syntax styles,
  1326. described in PEP 249's paramstyle, is supported.
  1327. Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
  1328. parse_dates : list or dict, default: None
  1329. - List of column names to parse as dates.
  1330. - Dict of ``{column_name: format string}`` where format string is
  1331. strftime compatible in case of parsing string times, or is one of
  1332. (D, s, ns, ms, us) in case of parsing integer timestamps.
  1333. - Dict of ``{column_name: arg dict}``, where the arg dict
  1334. corresponds to the keyword arguments of
  1335. :func:`pandas.to_datetime` Especially useful with databases
  1336. without native Datetime support, such as SQLite.
  1337. chunksize : int, default None
  1338. If specified, return an iterator where `chunksize` is the number
  1339. of rows to include in each chunk.
  1340. dtype : Type name or dict of columns
  1341. Data type for data or columns. E.g. np.float64 or
  1342. {‘a’: np.float64, ‘b’: np.int32, ‘c’: ‘Int64’}
  1343. .. versionadded:: 1.3.0
  1344. Returns
  1345. -------
  1346. DataFrame
  1347. See Also
  1348. --------
  1349. read_sql_table : Read SQL database table into a DataFrame.
  1350. read_sql
  1351. """
  1352. args = _convert_params(sql, params)
  1353. result = self.execute(*args)
  1354. columns = result.keys()
  1355. if chunksize is not None:
  1356. return self._query_iterator(
  1357. result,
  1358. chunksize,
  1359. columns,
  1360. index_col=index_col,
  1361. coerce_float=coerce_float,
  1362. parse_dates=parse_dates,
  1363. dtype=dtype,
  1364. )
  1365. else:
  1366. data = result.fetchall()
  1367. frame = _wrap_result(
  1368. data,
  1369. columns,
  1370. index_col=index_col,
  1371. coerce_float=coerce_float,
  1372. parse_dates=parse_dates,
  1373. dtype=dtype,
  1374. )
  1375. return frame
  1376. read_sql = read_query
  1377. def prep_table(
  1378. self,
  1379. frame,
  1380. name,
  1381. if_exists="fail",
  1382. index=True,
  1383. index_label=None,
  1384. schema=None,
  1385. dtype: DtypeArg | None = None,
  1386. ) -> SQLTable:
  1387. """
  1388. Prepares table in the database for data insertion. Creates it if needed, etc.
  1389. """
  1390. if dtype:
  1391. if not is_dict_like(dtype):
  1392. # error: Value expression in dictionary comprehension has incompatible
  1393. # type "Union[ExtensionDtype, str, dtype[Any], Type[object],
  1394. # Dict[Hashable, Union[ExtensionDtype, Union[str, dtype[Any]],
  1395. # Type[str], Type[float], Type[int], Type[complex], Type[bool],
  1396. # Type[object]]]]"; expected type "Union[ExtensionDtype, str,
  1397. # dtype[Any], Type[object]]"
  1398. dtype = {col_name: dtype for col_name in frame} # type: ignore[misc]
  1399. else:
  1400. dtype = cast(dict, dtype)
  1401. from sqlalchemy.types import (
  1402. TypeEngine,
  1403. to_instance,
  1404. )
  1405. for col, my_type in dtype.items():
  1406. if not isinstance(to_instance(my_type), TypeEngine):
  1407. raise ValueError(f"The type of {col} is not a SQLAlchemy type")
  1408. table = SQLTable(
  1409. name,
  1410. self,
  1411. frame=frame,
  1412. index=index,
  1413. if_exists=if_exists,
  1414. index_label=index_label,
  1415. schema=schema,
  1416. dtype=dtype,
  1417. )
  1418. table.create()
  1419. return table
  1420. def check_case_sensitive(
  1421. self,
  1422. name,
  1423. schema,
  1424. ):
  1425. """
  1426. Checks table name for issues with case-sensitivity.
  1427. Method is called after data is inserted.
  1428. """
  1429. if not name.isdigit() and not name.islower():
  1430. # check for potentially case sensitivity issues (GH7815)
  1431. # Only check when name is not a number and name is not lower case
  1432. engine = self.connectable.engine
  1433. with self.connectable.connect() as conn:
  1434. if _gt14():
  1435. from sqlalchemy import inspect
  1436. insp = inspect(conn)
  1437. table_names = insp.get_table_names(
  1438. schema=schema or self.meta.schema
  1439. )
  1440. else:
  1441. table_names = engine.table_names(
  1442. schema=schema or self.meta.schema, connection=conn
  1443. )
  1444. if name not in table_names:
  1445. msg = (
  1446. f"The provided table name '{name}' is not found exactly as "
  1447. "such in the database after writing the table, possibly "
  1448. "due to case sensitivity issues. Consider using lower "
  1449. "case table names."
  1450. )
  1451. warnings.warn(msg, UserWarning)
  1452. def to_sql(
  1453. self,
  1454. frame,
  1455. name,
  1456. if_exists="fail",
  1457. index=True,
  1458. index_label=None,
  1459. schema=None,
  1460. chunksize=None,
  1461. dtype: DtypeArg | None = None,
  1462. method=None,
  1463. engine="auto",
  1464. **engine_kwargs,
  1465. ):
  1466. """
  1467. Write records stored in a DataFrame to a SQL database.
  1468. Parameters
  1469. ----------
  1470. frame : DataFrame
  1471. name : string
  1472. Name of SQL table.
  1473. if_exists : {'fail', 'replace', 'append'}, default 'fail'
  1474. - fail: If table exists, do nothing.
  1475. - replace: If table exists, drop it, recreate it, and insert data.
  1476. - append: If table exists, insert data. Create if does not exist.
  1477. index : boolean, default True
  1478. Write DataFrame index as a column.
  1479. index_label : string or sequence, default None
  1480. Column label for index column(s). If None is given (default) and
  1481. `index` is True, then the index names are used.
  1482. A sequence should be given if the DataFrame uses MultiIndex.
  1483. schema : string, default None
  1484. Name of SQL schema in database to write to (if database flavor
  1485. supports this). If specified, this overwrites the default
  1486. schema of the SQLDatabase object.
  1487. chunksize : int, default None
  1488. If not None, then rows will be written in batches of this size at a
  1489. time. If None, all rows will be written at once.
  1490. dtype : single type or dict of column name to SQL type, default None
  1491. Optional specifying the datatype for columns. The SQL type should
  1492. be a SQLAlchemy type. If all columns are of the same type, one
  1493. single value can be used.
  1494. method : {None', 'multi', callable}, default None
  1495. Controls the SQL insertion clause used:
  1496. * None : Uses standard SQL ``INSERT`` clause (one per row).
  1497. * 'multi': Pass multiple values in a single ``INSERT`` clause.
  1498. * callable with signature ``(pd_table, conn, keys, data_iter)``.
  1499. Details and a sample callable implementation can be found in the
  1500. section :ref:`insert method <io.sql.method>`.
  1501. engine : {'auto', 'sqlalchemy'}, default 'auto'
  1502. SQL engine library to use. If 'auto', then the option
  1503. ``io.sql.engine`` is used. The default ``io.sql.engine``
  1504. behavior is 'sqlalchemy'
  1505. .. versionadded:: 1.3.0
  1506. **engine_kwargs
  1507. Any additional kwargs are passed to the engine.
  1508. """
  1509. sql_engine = get_engine(engine)
  1510. table = self.prep_table(
  1511. frame=frame,
  1512. name=name,
  1513. if_exists=if_exists,
  1514. index=index,
  1515. index_label=index_label,
  1516. schema=schema,
  1517. dtype=dtype,
  1518. )
  1519. sql_engine.insert_records(
  1520. table=table,
  1521. con=self.connectable,
  1522. frame=frame,
  1523. name=name,
  1524. index=index,
  1525. schema=schema,
  1526. chunksize=chunksize,
  1527. method=method,
  1528. **engine_kwargs,
  1529. )
  1530. self.check_case_sensitive(name=name, schema=schema)
  1531. @property
  1532. def tables(self):
  1533. return self.meta.tables
  1534. def has_table(self, name: str, schema: str | None = None):
  1535. if _gt14():
  1536. import sqlalchemy as sa
  1537. insp = sa.inspect(self.connectable)
  1538. return insp.has_table(name, schema or self.meta.schema)
  1539. else:
  1540. return self.connectable.run_callable(
  1541. self.connectable.dialect.has_table, name, schema or self.meta.schema
  1542. )
  1543. def get_table(self, table_name: str, schema: str | None = None):
  1544. schema = schema or self.meta.schema
  1545. if schema:
  1546. tbl = self.meta.tables.get(".".join([schema, table_name]))
  1547. else:
  1548. tbl = self.meta.tables.get(table_name)
  1549. # Avoid casting double-precision floats into decimals
  1550. from sqlalchemy import Numeric
  1551. for column in tbl.columns:
  1552. if isinstance(column.type, Numeric):
  1553. column.type.asdecimal = False
  1554. return tbl
  1555. def drop_table(self, table_name: str, schema: str | None = None):
  1556. schema = schema or self.meta.schema
  1557. if self.has_table(table_name, schema):
  1558. self.meta.reflect(only=[table_name], schema=schema)
  1559. self.get_table(table_name, schema).drop()
  1560. self.meta.clear()
  1561. def _create_sql_schema(
  1562. self,
  1563. frame: DataFrame,
  1564. table_name: str,
  1565. keys: list[str] | None = None,
  1566. dtype: DtypeArg | None = None,
  1567. schema: str | None = None,
  1568. ):
  1569. table = SQLTable(
  1570. table_name,
  1571. self,
  1572. frame=frame,
  1573. index=False,
  1574. keys=keys,
  1575. dtype=dtype,
  1576. schema=schema,
  1577. )
  1578. return str(table.sql_schema())
  1579. # ---- SQL without SQLAlchemy ---
  1580. # sqlite-specific sql strings and handler class
  1581. # dictionary used for readability purposes
  1582. _SQL_TYPES = {
  1583. "string": "TEXT",
  1584. "floating": "REAL",
  1585. "integer": "INTEGER",
  1586. "datetime": "TIMESTAMP",
  1587. "date": "DATE",
  1588. "time": "TIME",
  1589. "boolean": "INTEGER",
  1590. }
  1591. def _get_unicode_name(name):
  1592. try:
  1593. uname = str(name).encode("utf-8", "strict").decode("utf-8")
  1594. except UnicodeError as err:
  1595. raise ValueError(f"Cannot convert identifier to UTF-8: '{name}'") from err
  1596. return uname
  1597. def _get_valid_sqlite_name(name):
  1598. # See https://stackoverflow.com/questions/6514274/how-do-you-escape-strings\
  1599. # -for-sqlite-table-column-names-in-python
  1600. # Ensure the string can be encoded as UTF-8.
  1601. # Ensure the string does not include any NUL characters.
  1602. # Replace all " with "".
  1603. # Wrap the entire thing in double quotes.
  1604. uname = _get_unicode_name(name)
  1605. if not len(uname):
  1606. raise ValueError("Empty table or column name specified")
  1607. nul_index = uname.find("\x00")
  1608. if nul_index >= 0:
  1609. raise ValueError("SQLite identifier cannot contain NULs")
  1610. return '"' + uname.replace('"', '""') + '"'
  1611. _SAFE_NAMES_WARNING = (
  1612. "The spaces in these column names will not be changed. "
  1613. "In pandas versions < 0.14, spaces were converted to underscores."
  1614. )
  1615. class SQLiteTable(SQLTable):
  1616. """
  1617. Patch the SQLTable for fallback support.
  1618. Instead of a table variable just use the Create Table statement.
  1619. """
  1620. def __init__(self, *args, **kwargs):
  1621. # GH 8341
  1622. # register an adapter callable for datetime.time object
  1623. import sqlite3
  1624. # this will transform time(12,34,56,789) into '12:34:56.000789'
  1625. # (this is what sqlalchemy does)
  1626. sqlite3.register_adapter(time, lambda _: _.strftime("%H:%M:%S.%f"))
  1627. super().__init__(*args, **kwargs)
  1628. def sql_schema(self):
  1629. return str(";\n".join(self.table))
  1630. def _execute_create(self):
  1631. with self.pd_sql.run_transaction() as conn:
  1632. for stmt in self.table:
  1633. conn.execute(stmt)
  1634. def insert_statement(self, *, num_rows: int):
  1635. names = list(map(str, self.frame.columns))
  1636. wld = "?" # wildcard char
  1637. escape = _get_valid_sqlite_name
  1638. if self.index is not None:
  1639. for idx in self.index[::-1]:
  1640. names.insert(0, idx)
  1641. bracketed_names = [escape(column) for column in names]
  1642. col_names = ",".join(bracketed_names)
  1643. row_wildcards = ",".join([wld] * len(names))
  1644. wildcards = ",".join(f"({row_wildcards})" for _ in range(num_rows))
  1645. insert_statement = (
  1646. f"INSERT INTO {escape(self.name)} ({col_names}) VALUES {wildcards}"
  1647. )
  1648. return insert_statement
  1649. def _execute_insert(self, conn, keys, data_iter):
  1650. data_list = list(data_iter)
  1651. conn.executemany(self.insert_statement(num_rows=1), data_list)
  1652. def _execute_insert_multi(self, conn, keys, data_iter):
  1653. data_list = list(data_iter)
  1654. flattened_data = [x for row in data_list for x in row]
  1655. conn.execute(self.insert_statement(num_rows=len(data_list)), flattened_data)
  1656. def _create_table_setup(self):
  1657. """
  1658. Return a list of SQL statements that creates a table reflecting the
  1659. structure of a DataFrame. The first entry will be a CREATE TABLE
  1660. statement while the rest will be CREATE INDEX statements.
  1661. """
  1662. column_names_and_types = self._get_column_names_and_types(self._sql_type_name)
  1663. pat = re.compile(r"\s+")
  1664. column_names = [col_name for col_name, _, _ in column_names_and_types]
  1665. if any(map(pat.search, column_names)):
  1666. warnings.warn(_SAFE_NAMES_WARNING, stacklevel=6)
  1667. escape = _get_valid_sqlite_name
  1668. create_tbl_stmts = [
  1669. escape(cname) + " " + ctype for cname, ctype, _ in column_names_and_types
  1670. ]
  1671. if self.keys is not None and len(self.keys):
  1672. if not is_list_like(self.keys):
  1673. keys = [self.keys]
  1674. else:
  1675. keys = self.keys
  1676. cnames_br = ", ".join(escape(c) for c in keys)
  1677. create_tbl_stmts.append(
  1678. f"CONSTRAINT {self.name}_pk PRIMARY KEY ({cnames_br})"
  1679. )
  1680. if self.schema:
  1681. schema_name = self.schema + "."
  1682. else:
  1683. schema_name = ""
  1684. create_stmts = [
  1685. "CREATE TABLE "
  1686. + schema_name
  1687. + escape(self.name)
  1688. + " (\n"
  1689. + ",\n ".join(create_tbl_stmts)
  1690. + "\n)"
  1691. ]
  1692. ix_cols = [cname for cname, _, is_index in column_names_and_types if is_index]
  1693. if len(ix_cols):
  1694. cnames = "_".join(ix_cols)
  1695. cnames_br = ",".join(escape(c) for c in ix_cols)
  1696. create_stmts.append(
  1697. "CREATE INDEX "
  1698. + escape("ix_" + self.name + "_" + cnames)
  1699. + "ON "
  1700. + escape(self.name)
  1701. + " ("
  1702. + cnames_br
  1703. + ")"
  1704. )
  1705. return create_stmts
  1706. def _sql_type_name(self, col):
  1707. dtype: DtypeArg = self.dtype or {}
  1708. if is_dict_like(dtype):
  1709. dtype = cast(dict, dtype)
  1710. if col.name in dtype:
  1711. return dtype[col.name]
  1712. # Infer type of column, while ignoring missing values.
  1713. # Needed for inserting typed data containing NULLs, GH 8778.
  1714. col_type = lib.infer_dtype(col, skipna=True)
  1715. if col_type == "timedelta64":
  1716. warnings.warn(
  1717. "the 'timedelta' type is not supported, and will be "
  1718. "written as integer values (ns frequency) to the database.",
  1719. UserWarning,
  1720. stacklevel=8,
  1721. )
  1722. col_type = "integer"
  1723. elif col_type == "datetime64":
  1724. col_type = "datetime"
  1725. elif col_type == "empty":
  1726. col_type = "string"
  1727. elif col_type == "complex":
  1728. raise ValueError("Complex datatypes not supported")
  1729. if col_type not in _SQL_TYPES:
  1730. col_type = "string"
  1731. return _SQL_TYPES[col_type]
  1732. class SQLiteDatabase(PandasSQL):
  1733. """
  1734. Version of SQLDatabase to support SQLite connections (fallback without
  1735. SQLAlchemy). This should only be used internally.
  1736. Parameters
  1737. ----------
  1738. con : sqlite connection object
  1739. """
  1740. def __init__(self, con, is_cursor: bool = False):
  1741. self.is_cursor = is_cursor
  1742. self.con = con
  1743. @contextmanager
  1744. def run_transaction(self):
  1745. cur = self.con.cursor()
  1746. try:
  1747. yield cur
  1748. self.con.commit()
  1749. except Exception:
  1750. self.con.rollback()
  1751. raise
  1752. finally:
  1753. cur.close()
  1754. def execute(self, *args, **kwargs):
  1755. if self.is_cursor:
  1756. cur = self.con
  1757. else:
  1758. cur = self.con.cursor()
  1759. try:
  1760. cur.execute(*args, **kwargs)
  1761. return cur
  1762. except Exception as exc:
  1763. try:
  1764. self.con.rollback()
  1765. except Exception as inner_exc: # pragma: no cover
  1766. ex = DatabaseError(
  1767. f"Execution failed on sql: {args[0]}\n{exc}\nunable to rollback"
  1768. )
  1769. raise ex from inner_exc
  1770. ex = DatabaseError(f"Execution failed on sql '{args[0]}': {exc}")
  1771. raise ex from exc
  1772. @staticmethod
  1773. def _query_iterator(
  1774. cursor,
  1775. chunksize: int,
  1776. columns,
  1777. index_col=None,
  1778. coerce_float: bool = True,
  1779. parse_dates=None,
  1780. dtype: DtypeArg | None = None,
  1781. ):
  1782. """Return generator through chunked result set"""
  1783. has_read_data = False
  1784. while True:
  1785. data = cursor.fetchmany(chunksize)
  1786. if type(data) == tuple:
  1787. data = list(data)
  1788. if not data:
  1789. cursor.close()
  1790. if not has_read_data:
  1791. yield DataFrame.from_records(
  1792. [], columns=columns, coerce_float=coerce_float
  1793. )
  1794. break
  1795. else:
  1796. has_read_data = True
  1797. yield _wrap_result(
  1798. data,
  1799. columns,
  1800. index_col=index_col,
  1801. coerce_float=coerce_float,
  1802. parse_dates=parse_dates,
  1803. dtype=dtype,
  1804. )
  1805. def read_query(
  1806. self,
  1807. sql,
  1808. index_col=None,
  1809. coerce_float: bool = True,
  1810. params=None,
  1811. parse_dates=None,
  1812. chunksize: int | None = None,
  1813. dtype: DtypeArg | None = None,
  1814. ):
  1815. args = _convert_params(sql, params)
  1816. cursor = self.execute(*args)
  1817. columns = [col_desc[0] for col_desc in cursor.description]
  1818. if chunksize is not None:
  1819. return self._query_iterator(
  1820. cursor,
  1821. chunksize,
  1822. columns,
  1823. index_col=index_col,
  1824. coerce_float=coerce_float,
  1825. parse_dates=parse_dates,
  1826. dtype=dtype,
  1827. )
  1828. else:
  1829. data = self._fetchall_as_list(cursor)
  1830. cursor.close()
  1831. frame = _wrap_result(
  1832. data,
  1833. columns,
  1834. index_col=index_col,
  1835. coerce_float=coerce_float,
  1836. parse_dates=parse_dates,
  1837. dtype=dtype,
  1838. )
  1839. return frame
  1840. def _fetchall_as_list(self, cur):
  1841. result = cur.fetchall()
  1842. if not isinstance(result, list):
  1843. result = list(result)
  1844. return result
  1845. def to_sql(
  1846. self,
  1847. frame,
  1848. name,
  1849. if_exists="fail",
  1850. index=True,
  1851. index_label=None,
  1852. schema=None,
  1853. chunksize=None,
  1854. dtype: DtypeArg | None = None,
  1855. method=None,
  1856. **kwargs,
  1857. ):
  1858. """
  1859. Write records stored in a DataFrame to a SQL database.
  1860. Parameters
  1861. ----------
  1862. frame: DataFrame
  1863. name: string
  1864. Name of SQL table.
  1865. if_exists: {'fail', 'replace', 'append'}, default 'fail'
  1866. fail: If table exists, do nothing.
  1867. replace: If table exists, drop it, recreate it, and insert data.
  1868. append: If table exists, insert data. Create if it does not exist.
  1869. index : bool, default True
  1870. Write DataFrame index as a column
  1871. index_label : string or sequence, default None
  1872. Column label for index column(s). If None is given (default) and
  1873. `index` is True, then the index names are used.
  1874. A sequence should be given if the DataFrame uses MultiIndex.
  1875. schema : string, default None
  1876. Ignored parameter included for compatibility with SQLAlchemy
  1877. version of ``to_sql``.
  1878. chunksize : int, default None
  1879. If not None, then rows will be written in batches of this
  1880. size at a time. If None, all rows will be written at once.
  1881. dtype : single type or dict of column name to SQL type, default None
  1882. Optional specifying the datatype for columns. The SQL type should
  1883. be a string. If all columns are of the same type, one single value
  1884. can be used.
  1885. method : {None, 'multi', callable}, default None
  1886. Controls the SQL insertion clause used:
  1887. * None : Uses standard SQL ``INSERT`` clause (one per row).
  1888. * 'multi': Pass multiple values in a single ``INSERT`` clause.
  1889. * callable with signature ``(pd_table, conn, keys, data_iter)``.
  1890. Details and a sample callable implementation can be found in the
  1891. section :ref:`insert method <io.sql.method>`.
  1892. """
  1893. if dtype:
  1894. if not is_dict_like(dtype):
  1895. # error: Value expression in dictionary comprehension has incompatible
  1896. # type "Union[ExtensionDtype, str, dtype[Any], Type[object],
  1897. # Dict[Hashable, Union[ExtensionDtype, Union[str, dtype[Any]],
  1898. # Type[str], Type[float], Type[int], Type[complex], Type[bool],
  1899. # Type[object]]]]"; expected type "Union[ExtensionDtype, str,
  1900. # dtype[Any], Type[object]]"
  1901. dtype = {col_name: dtype for col_name in frame} # type: ignore[misc]
  1902. else:
  1903. dtype = cast(dict, dtype)
  1904. for col, my_type in dtype.items():
  1905. if not isinstance(my_type, str):
  1906. raise ValueError(f"{col} ({my_type}) not a string")
  1907. table = SQLiteTable(
  1908. name,
  1909. self,
  1910. frame=frame,
  1911. index=index,
  1912. if_exists=if_exists,
  1913. index_label=index_label,
  1914. dtype=dtype,
  1915. )
  1916. table.create()
  1917. table.insert(chunksize, method)
  1918. def has_table(self, name: str, schema: str | None = None):
  1919. # TODO(wesm): unused?
  1920. # escape = _get_valid_sqlite_name
  1921. # esc_name = escape(name)
  1922. wld = "?"
  1923. query = f"SELECT name FROM sqlite_master WHERE type='table' AND name={wld};"
  1924. return len(self.execute(query, [name]).fetchall()) > 0
  1925. def get_table(self, table_name: str, schema: str | None = None):
  1926. return None # not supported in fallback mode
  1927. def drop_table(self, name: str, schema: str | None = None):
  1928. drop_sql = f"DROP TABLE {_get_valid_sqlite_name(name)}"
  1929. self.execute(drop_sql)
  1930. def _create_sql_schema(
  1931. self,
  1932. frame,
  1933. table_name: str,
  1934. keys=None,
  1935. dtype: DtypeArg | None = None,
  1936. schema: str | None = None,
  1937. ):
  1938. table = SQLiteTable(
  1939. table_name,
  1940. self,
  1941. frame=frame,
  1942. index=False,
  1943. keys=keys,
  1944. dtype=dtype,
  1945. schema=schema,
  1946. )
  1947. return str(table.sql_schema())
  1948. def get_schema(
  1949. frame,
  1950. name: str,
  1951. keys=None,
  1952. con=None,
  1953. dtype: DtypeArg | None = None,
  1954. schema: str | None = None,
  1955. ):
  1956. """
  1957. Get the SQL db table schema for the given frame.
  1958. Parameters
  1959. ----------
  1960. frame : DataFrame
  1961. name : str
  1962. name of SQL table
  1963. keys : string or sequence, default: None
  1964. columns to use a primary key
  1965. con: an open SQL database connection object or a SQLAlchemy connectable
  1966. Using SQLAlchemy makes it possible to use any DB supported by that
  1967. library, default: None
  1968. If a DBAPI2 object, only sqlite3 is supported.
  1969. dtype : dict of column name to SQL type, default None
  1970. Optional specifying the datatype for columns. The SQL type should
  1971. be a SQLAlchemy type, or a string for sqlite3 fallback connection.
  1972. schema: str, default: None
  1973. Optional specifying the schema to be used in creating the table.
  1974. .. versionadded:: 1.2.0
  1975. """
  1976. pandas_sql = pandasSQL_builder(con=con)
  1977. return pandas_sql._create_sql_schema(
  1978. frame, name, keys=keys, dtype=dtype, schema=schema
  1979. )