pax_global_header 0000666 0000000 0000000 00000000064 14700603111 0014503 g ustar 00root root 0000000 0000000 52 comment=218cf8263727ce662483fbf26ab08bd9cf22cfad
next-jdbc-1.3.955/ 0000775 0000000 0000000 00000000000 14700603111 0013545 5 ustar 00root root 0000000 0000000 next-jdbc-1.3.955/.clj-kondo/ 0000775 0000000 0000000 00000000000 14700603111 0015503 5 ustar 00root root 0000000 0000000 next-jdbc-1.3.955/.clj-kondo/config.edn 0000664 0000000 0000000 00000000122 14700603111 0017433 0 ustar 00root root 0000000 0000000 {:config-paths ["resources/clj-kondo.exports/com.github.seancorfield/next.jdbc"]}
next-jdbc-1.3.955/.github/ 0000775 0000000 0000000 00000000000 14700603111 0015105 5 ustar 00root root 0000000 0000000 next-jdbc-1.3.955/.github/FUNDING.yml 0000664 0000000 0000000 00000000025 14700603111 0016717 0 ustar 00root root 0000000 0000000 github: seancorfield
next-jdbc-1.3.955/.github/ISSUE_TEMPLATE/ 0000775 0000000 0000000 00000000000 14700603111 0017270 5 ustar 00root root 0000000 0000000 next-jdbc-1.3.955/.github/ISSUE_TEMPLATE/bug_report.md 0000664 0000000 0000000 00000002005 14700603111 0021757 0 ustar 00root root 0000000 0000000 ---
name: Bug report
about: Create a report to help us improve
title: ''
labels: ''
assignees: ''
---
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior (preferably code -- or link to a GitHub repo containing a small, self-contained repro example):
1. ...
2. ...
3. ...
**Expected behavior**
A clear and concise description of what you expected to happen.
**Stacktraces**
If applicable, add stack traces to help explain your problem.
**project.clj/deps.edn**
If possible, please include your Leiningen or clj dependencies, at least for Clojure, next.jdbc, and the database drivers you are using.
**Environment (please complete the following information):**
- OS: [e.g. Linux]
- Java Version: [e.g. (AdoptOpenJDK)(build 1.8.0_192-b12)]
- Clojure Version: [e.g. 1.10.3]
- Database: [e.g., MySQL 5.7 (Percona)]
- Driver Library Version: [e.g., mysql/mysql-connector-java 5.1.41]
**Additional context**
Add any other context about the problem here.
next-jdbc-1.3.955/.github/ISSUE_TEMPLATE/feature_request.md 0000664 0000000 0000000 00000001123 14700603111 0023012 0 ustar 00root root 0000000 0000000 ---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: ''
assignees: ''
---
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.
next-jdbc-1.3.955/.github/workflows/ 0000775 0000000 0000000 00000000000 14700603111 0017142 5 ustar 00root root 0000000 0000000 next-jdbc-1.3.955/.github/workflows/test-and-release.yml 0000664 0000000 0000000 00000002655 14700603111 0023032 0 ustar 00root root 0000000 0000000 name: Release Version
on:
push:
tags:
- "v*"
jobs:
build-and-release:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- uses: actions/setup-java@v4
with:
distribution: 'temurin'
java-version: '11'
- name: Setup Clojure
uses: DeLaGuardo/setup-clojure@master
with:
cli: '1.12.0.1479'
- name: Cache All The Things
uses: actions/cache@v4
with:
path: |
~/.m2/repository
~/.gitlibs
~/.clojure
~/.cpcache
key: ${{ runner.os }}-${{ hashFiles('**/deps.edn') }}
- name: Setup Databases
run: docker compose up -d
env:
MYSQL_ROOT_PASSWORD: testing
- name: Run MariaDB Tests
run: clojure -X:test
env:
MYSQL_ROOT_PASSWORD: testing
NEXT_JDBC_TEST_MYSQL: yes
NEXT_JDBC_TEST_MARIADB: yes
- name: Run All Tests and Release
run: clojure -T:build ci :snapshot false
env:
MYSQL_ROOT_PASSWORD: testing
NEXT_JDBC_TEST_MYSQL: yes
NEXT_JDBC_TEST_MSSQL: yes
MSSQL_SA_PASSWORD: Str0ngP4ssw0rd
- name: Deploy Release
run: clojure -T:build deploy :snapshot false
env:
CLOJARS_PASSWORD: ${{secrets.DEPLOY_TOKEN}}
CLOJARS_USERNAME: ${{secrets.DEPLOY_USERNAME}}
next-jdbc-1.3.955/.github/workflows/test-and-snapshot.yml 0000664 0000000 0000000 00000004127 14700603111 0023245 0 ustar 00root root 0000000 0000000 name: Develop & Snapshot
on:
push:
branches:
- "develop"
jobs:
build-and-snapshot:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-java@v4
with:
distribution: 'temurin'
java-version: '11'
- name: Setup Clojure
uses: DeLaGuardo/setup-clojure@master
with:
cli: '1.12.0.1479'
- name: Cache All The Things
uses: actions/cache@v4
with:
path: |
~/.m2/repository
~/.gitlibs
~/.clojure
~/.cpcache
key: ${{ runner.os }}-${{ hashFiles('**/deps.edn') }}
- name: Setup Databases
run: docker compose up -d
env:
MYSQL_ROOT_PASSWORD: testing
- name: Run MariaDB Tests
run: clojure -X:test
env:
MYSQL_ROOT_PASSWORD: testing
NEXT_JDBC_TEST_MYSQL: yes
NEXT_JDBC_TEST_MARIADB: yes
- name: Run All Tests and Snapshot
run: clojure -T:build ci :snapshot true
env:
MYSQL_ROOT_PASSWORD: testing
NEXT_JDBC_TEST_MYSQL: yes
NEXT_JDBC_TEST_MSSQL: yes
MSSQL_SA_PASSWORD: Str0ngP4ssw0rd
- name: Deploy Snapshot
run: clojure -T:build deploy :snapshot true
env:
CLOJARS_PASSWORD: ${{secrets.DEPLOY_TOKEN}}
CLOJARS_USERNAME: ${{secrets.DEPLOY_USERNAME}}
build:
runs-on: ubuntu-latest
strategy:
matrix:
java: [ '11', '17', '21' ]
steps:
- uses: actions/checkout@v4
- uses: actions/setup-java@v4
with:
distribution: 'temurin'
java-version: ${{ matrix.java }}
- name: Setup Clojure
uses: DeLaGuardo/setup-clojure@master
with:
cli: '1.12.0.1479'
- name: Cache All The Things
uses: actions/cache@v4
with:
path: |
~/.m2/repository
~/.gitlibs
~/.clojure
~/.cpcache
key: ${{ runner.os }}-${{ hashFiles('**/deps.edn') }}
- name: Run Tests
run: clojure -T:build test
next-jdbc-1.3.955/.github/workflows/test.yml 0000664 0000000 0000000 00000002306 14700603111 0020645 0 ustar 00root root 0000000 0000000 name: Pull Request
on: [pull_request]
jobs:
build:
runs-on: ubuntu-latest
strategy:
matrix:
java: [ '11', '17', '21' ]
steps:
- uses: actions/checkout@v4
- uses: actions/setup-java@v4
with:
distribution: 'temurin'
java-version: ${{ matrix.java }}
- name: Setup Clojure
uses: DeLaGuardo/setup-clojure@master
with:
cli: '1.12.0.1479'
- name: Cache All The Things
uses: actions/cache@v4
with:
path: |
~/.m2/repository
~/.gitlibs
~/.clojure
~/.cpcache
key: ${{ runner.os }}-${{ hashFiles('**/deps.edn') }}
- name: Setup Databases
run: docker compose up -d
env:
MYSQL_ROOT_PASSWORD: testing
- name: Run MariaDB Tests
run: clojure -X:test
env:
MYSQL_ROOT_PASSWORD: testing
NEXT_JDBC_TEST_MYSQL: yes
NEXT_JDBC_TEST_MARIADB: yes
- name: Run All Tests
run: clojure -X:test
env:
MYSQL_ROOT_PASSWORD: testing
NEXT_JDBC_TEST_MYSQL: yes
NEXT_JDBC_TEST_MSSQL: yes
MSSQL_SA_PASSWORD: Str0ngP4ssw0rd
next-jdbc-1.3.955/.gitignore 0000664 0000000 0000000 00000000612 14700603111 0015534 0 ustar 00root root 0000000 0000000 *.class
*.jar
*.swp
*~
.calva/output-window/
.calva/repl.calva-repl
.classpath
.clj-kondo/.cache
.clj-kondo/com.github.seancorfield/next.jdbc
.cpcache
.eastwood
.factorypath
.hg/
.hgignore
.java-version
.lein-*
.lsp/.cache
.lsp/sqlite.db
.nrepl-history
.nrepl-port
.portal
.project
.rebel_readline_history
.settings
.socket-repl-port
.sw*
/checkouts
/classes
/clojure_test_*
/derby.log
/target
next-jdbc-1.3.955/.gitpod.dockerfile 0000664 0000000 0000000 00000000117 14700603111 0017141 0 ustar 00root root 0000000 0000000 FROM gitpod/workspace-full
RUN brew install clojure/tools/clojure@1.12.0.1479
next-jdbc-1.3.955/.gitpod.yml 0000664 0000000 0000000 00000001034 14700603111 0015632 0 ustar 00root root 0000000 0000000 image:
file: .gitpod.dockerfile
vscode:
extensions:
- betterthantomorrow.calva
- mauricioszabo.clover
tasks:
- name: Prepare deps/clover
init: |
clojure -A:test -P
echo 50505 > .socket-repl-port
mkdir ~/.config/clover
cp .clover/config.cljs ~/.config/clover/
- name: Start REPL
command: clojure -J-Dclojure.server.repl="{:address \"0.0.0.0\" :port 50505 :accept clojure.core.server/repl}" -A:test
- name: See Changes
command: code CHANGELOG.md
github:
prebuilds:
develop: true
next-jdbc-1.3.955/.joker 0000664 0000000 0000000 00000000445 14700603111 0014663 0 ustar 00root root 0000000 0000000 {:known-macros [next.jdbc/with-transaction]
:ignored-unused-namespaces [next.jdbc.connection
next.jdbc.date-time
next.jdbc.prepare
next.jdbc.result-set
next.jdbc.transaction]}
next-jdbc-1.3.955/.vscode/ 0000775 0000000 0000000 00000000000 14700603111 0015106 5 ustar 00root root 0000000 0000000 next-jdbc-1.3.955/.vscode/settings.json 0000664 0000000 0000000 00000000664 14700603111 0017647 0 ustar 00root root 0000000 0000000 {
"calva.replConnectSequences": [
{
"name": "next.jdbc (Jack-In)",
"projectType": "deps.edn",
"autoSelectForJackIn": true,
"menuSelections": {
"cljAliases": ["1.12", "dev/repl", "portal", "test"]
}
},
{
"name": "next.jdbc (Connect)",
"projectType": "deps.edn",
"autoSelectForConnect": true
}
],
"calva.autoStartRepl": true,
"calva.autoConnectRepl": true
}
next-jdbc-1.3.955/CHANGELOG.md 0000664 0000000 0000000 00000106550 14700603111 0015365 0 ustar 00root root 0000000 0000000 # Change Log
Only accretive/fixative changes will be made from now on.
* 1.3.955 in progress
* Address [#285](https://github.com/seancorfield/next-jdbc/issues/285) by setting the default Clojure version to the earliest supported (1.10.3) to give a better hint to users.
* Update PostgreSQL **Tips & Tricks** example code to fix possible NPE. PR [#284](https://github.com/seancorfield/next-jdbc/pull/284) from [@ExNexu](https://github.com/ExNexu).
* Address [#283](https://github.com/seancorfield/next-jdbc/issues/283) by adding a note in the documentation, linking to the PostgreSQL bug report about `ANY(array)`.
* Address [#269](https://github.com/seancorfield/next-jdbc/issues/269) by adding `:name-fn` as an option (primarily for the SQL builder functions, but also for result set processing); the default is `clojure.core/name` but you can now use `next.jdbc.sql.builder/qualified-name` to preserve the qualifier.
* Update testing deps; `docker-compose` => `docker compose`.
* 1.3.939 -- 2024-05-17
* Fix [#280](https://github.com/seancorfield/next-jdbc/issues/280) by allowing `-` as well as `_` in `nav` foreign key names.
* Address [#279](https://github.com/seancorfield/next-jdbc/issues/279) by adding the missing documentation.
* Address [#278](https://github.com/seancorfield/next-jdbc/issues/278) by fixing link in options page.
* Update dev dependencies, including testing against Clojure 1.12 Alpha 11.
* 1.3.925 -- 2024-03-15
* Address [#275](https://github.com/seancorfield/next-jdbc/issues/275) by noting that PostgreSQL may perform additional SQL queries to produce table names used in qualified result set builders.
* Address [#274](https://github.com/seancorfield/next-jdbc/issues/274) by adding `next.jdbc.sql/aggregate-by-keys` as a convenient wrapper around `find-by-keys` when you want just a single aggregate value back (such as `count`, `max`, etc).
* Address [#273](https://github.com/seancorfield/next-jdbc/issues/273) by linking to [PG2](https://github.com/igrishaev/pg2) in the PostgreSQL **Tips & Tricks** section.
* Address [#268](https://github.com/seancorfield/next-jdbc/issues/268) by expanding the documentation around `insert-multi!` and `insert!`.
* Update dependency versions (including Clojure).
* Code cleanup per `clj-kondo`.
* 1.3.909 -- 2023-12-16
* Address [#267](https://github.com/seancorfield/next-jdbc/issues/267) by adding the `:schema-opts` option to override the default conventions for identifying foreign keys in columns.
* Address [#264](https://github.com/seancorfield/next-jdbc/issues/264) by letting `insert-multi!` accept empty rows (and producing an empty result vector). This improves compatibility with `clojure.java.jdbc`.
* Address [#258](https://github.com/seancorfield/next-jdbc/issues/258) by updating all the library (driver) versions in Getting Started to match the latest versions being tested (from `deps.edn`).
* Update `java.data` to 1.1.103 so that `next.jdbc` no longer has a transitive dependency on `org.clojure/tools.logging`!
* Attempt to clarify that when calling `reduce` on the result of `plan`, you must provide an initial value.
* Expand examples for calling `next.jdbc.sql/find-by-keys` to show `LIKE` and `IN` clauses.
* Update `tools.build` to 0.9.6 (and get rid of `template/pom.xml` in favor of new `:pom-data` option to `b/write-pom`).
* 1.3.894 -- 2023-09-24
* Fix [#257](https://github.com/seancorfield/next-jdbc/issues/257) by making the `fdef` spec for `with-transaction` more permissive. Also add specs for `on-connection` and the `+options` variants of both macros.
* Address [#256](https://github.com/seancorfield/next-jdbc/issues/256) by adding `with-transaction+options` and `on-connection+options`.
* Updates most of the JDBC drivers used for testing, including SQLite 3.43.0.0 which now throws an exception when `.getGeneratedKeys()` is called so you cannot use `:return-generated-keys true` with it but you can add `RETURNING *` to your SQL statements instead (the tests have been updated to reflect this).
* Update `tools.build` to 0.9.5 (and remove `:java-opts` from `build/test`)
* 1.3.883 -- 2023-06-25
* Address [#254](https://github.com/seancorfield/next-jdbc/issues/254) by adding `next.jdbc/active-tx?` and adding more explanation to [**Transactions**](https://cljdoc.org/d/com.github.seancorfield/next.jdbc/CURRENT/doc/getting-started/transactions) about the conventions behind transactions and the limitations of thread-local tracking of active transactions in `next.jdbc`.
* Address [#251](https://github.com/seancorfield/next-jdbc/issues/251) by updating `next.jdbc/with-logging` docstring.
* Update dev/test dependencies.
* 1.3.874 -- 2023-04-15
* Fix [#248](https://github.com/seancorfield/next-jdbc/issues/248) by allowing `:port` to be `:none`.
* Address [#247](https://github.com/seancorfield/next-jdbc/issues/247) by adding examples of using `next.jdbc.connection/jdbc-url` to build a connection string with additional parameters when creating connection pools.
* 1.3.865 -- 2023-03-31
* Fix [#246](https://github.com/seancorfield/next-jdbc/issues/246) by adopting the `strop` function from HoneySQL.
* Address [#245](https://github.com/seancorfield/next-jdbc/issues/245) by not `locking` the `Connection` when `*nested-tx*` is bound to `:ignore` -- improving `clojure.java.jdbc` compatibility.
* Address [#237](https://github.com/seancorfield/next-jdbc/issues/237) by adding an `:init-fn` option to the `db-spec` argument for `next.jdbc.connection/component`.
* 1.3.862 -- 2023-03-13
* Fix [#243](https://github.com/seancorfield/next-jdbc/issues/243) by ensuring URI properties become keywords.
* Fix [#242](https://github.com/seancorfield/next-jdbc/issues/242) by making the logging wrapper aware of the default options wrapper.
* 1.3.858 -- 2023-03-05
* Address [#241](https://github.com/seancorfield/next-jdbc/issues/241) by correcting link to PostgreSQL docs.
* Address [clj-kondo#1685](https://github.com/clj-kondo/clj-kondo/issues/1685) by using `.clj_kondo` extension for hook files.
* Improve docs for SQLite users per [#239](https://github.com/seancorfield/next-jdbc/pull/239) -- [peristeri](https://github.com/peristeri).
* Address [#236](https://github.com/seancorfield/next-jdbc/issues/236) by showing examples of `run!` over `plan`.
* 1.3.847 -- 2022-11-04
* Fix [#232](https://github.com/seancorfield/next-jdbc/issues/232) by using `as-cols` in `insert-multi!` SQL builder. Thanks to @changsu-farmmorning for spotting that bug!
* Fix [#229](https://github.com/seancorfield/next-jdbc/issues/229) by adding `next.jdbc.connect/uri->db-spec` which converts a URI string to a db-spec hash map; in addition, if `DriverManager/getConnection` fails, it assumes it was passed a URI instead of a JDBC URL, and retries after calling that function and then recreating the JDBC URL (which should have the effect of moving the embedded user/password credentials into the properties structure instead of the URL).
* Address [#228](https://github.com/seancorfield/next-jdbc/issues/228) by adding `PreparedStatement` caveat to the Oracle **Tips & Tricks** section.
* Address [#226](https://github.com/seancorfield/next-jdbc/issues/226) by adding a section on exception handling to **Tips & Tricks** (TL;DR: it's all horribly vendor-specific!).
* Add `on-connection` to exported `clj-kondo` configuration.
* Switch `run-test` from `sh` to `bb`.
* 1.3.834 -- 2022-09-23
* Fix [#227](https://github.com/seancorfield/next-jdbc/issues/227) by correcting how [#221](https://github.com/seancorfield/next-jdbc/issues/221) was implemented.
* Address [#224](https://github.com/seancorfield/next-jdbc/issues/224) by attempting to clarify how to use the snake/kebab options and builders.
* 1.3.828 -- 2022-09-11
* Fix [#222](https://github.com/seancorfield/next-jdbc/issues/222) by correcting implementation of `.cons` on a row.
* Address [#221](https://github.com/seancorfield/next-jdbc/issues/221) by supporting `:column-fn` a top-level option in `plan`-related functions to transform keys used in reducing function contexts. Also corrects handling of column names in schema `nav`igation (which previously only supported `:table-fn` and incorrectly applied it to columns as well).
* Address [#218](https://github.com/seancorfield/next-jdbc/issues/218) by moving `:extend-via-metadata true` after the protocols' docstrings.
* Document `:useBulkCopyForBatchInsert` for Microsoft SQL Server via PR [#216](https://github.com/seancorfield/next-jdbc/issues/216) -- [danskarda](https://github.com/danskarda).
* Address [#215](https://github.com/seancorfield/next-jdbc/issues/215) by dropping official support for JDK 8 and updating various JDBC drivers in the testing matrix.
* Address [#214](https://github.com/seancorfield/next-jdbc/issues/214) by updating test/CI versions.
* Address [#212](https://github.com/seancorfield/next-jdbc/issues/212) by documenting the problem with SQLite's JDBC driver.
* Fix [#211](https://github.com/seancorfield/next-jdbc/issues/211) by auto-creating `clojure_test` DB in MySQL if needed; also streamline the CI processes.
* Fix [#210](https://github.com/seancorfield/next-jdbc/issues/210) by updating CI to test against MySQL and SQL Server.
* Switch SQL Server testing setup to `docker-compose`.
* 1.2.796 -- 2022-08-01
* Make `Transactable` extensible via metadata, via PR [#209](https://github.com/seancorfield/next-jdbc/issues/209) -- [@vemv](https://github.com/vemv).
* Fix [#208](https://github.com/seancorfield/next-jdbc/issues/208) by treating unsupported exception as an empty string, just like the JDBC docs say should happen.
* 1.2.790 -- 2022-07-29
* Address [#207](https://github.com/seancorfield/next-jdbc/issues/207) by supporting "db-spec" hash maps containing `:datasource` or `:connection-uri` (this is otherwise undocumented and intended to aid migration from `clojure.java.jdbc`).
* Address [#199](https://github.com/seancorfield/next-jdbc/issues/199) by adding notes on UTC usage -- [@denismccarthykerry](https://github.com/denismccarthykerry).
* Enhance `insert-multi!` to accept a sequence of hash maps and also to support batch execution, via PR [#206](https://github.com/seancorfield/next-jdbc/pull/206) -- [@rschmukler](https://github.com/rschmukler).
* Fix HikariCP pooling example.
* 1.2.780 -- 2022-04-04
* Address [#204](https://github.com/seancorfield/next-jdbc/issues/204) by adding `next.jdbc/on-connection`.
* Address [#203](https://github.com/seancorfield/next-jdbc/issues/203) by adding a note to the **PostgreSQL Tips & Tricks** section.
* Update `build-clj` to v0.8.0.
* 1.2.772 -- 2022-02-09
* To support more tools that perform `datafy`/`nav`, make rows directly `nav`able (even though this is not really the correct behavior).
* Address #193 by expanding the argument specs for `get-datasource` and `get-connection`.
* Streamline `execute-batch!` for `with-options` and `with-logging` (and this should generalize to any wrapper that satisfies `Connectable` and stores the actual `Connection` under the `:connectable` key).
* Update log4j2 test dependency.
* Update `build-clj` to v0.6.7.
* 1.2.761 -- 2021-12-15
* Fix #194 by throwing an exception if a table or column name used with the friendly SQL functions (or the SQL builder functions behind them) contains a "suspicious" character (currently, that's just `;`).
* Update several test dependencies (incl. log4j2).
* Update `build-clj` to v0.6.3.
* 1.2.753 -- 2021-11-17
* Address #187 by adding `clj-kondo.exports` for future expansion (`with-transaction` is already built into `clj-kondo`).
* Documentation updates; `pom.xml` template cleanup.
* Update `build-clj` to v0.5.4.
* 1.2.737 -- 2021-10-17
* Address #186 by updating `java.data` to 1.0.92 and documenting HikariCP's `:dataSourceProperties`.
* Address #184 by improving documentation about `:jdbcUrl`.
* 1.2.731 -- 2021-10-04
* Fix #181 by supporting option-wrapped connectables in `execute-batch!`.
* Address #179 by improving documentation around connection pool initialization.
* Update `build-clj` to v0.5.0.
* 1.2.724 -- 2021-09-25
* Make `next.jdbc` compatible with GraalVM 22+ (PR #178, @FieryCod).
* Address #177 by adding an important performance tip for Oracle.
* Update most of the JDBC drivers for testing; make it easier to test MariaDB's driver;
* 1.2.709 -- 2021-08-30
* Fix #174 by removing `:property-separator` from "etc" map and defaulting H2 to `";"` for this.
* Switch to `tools.build` for running tests and JAR building etc.
* 1.2.689 -- 2021-08-01
* Address #173 by extending `DatafiableRow` to `ResultSet` so there's a public method to call on (rows of) a JDBC result set directly.
* Address #171 by clarifying that you cannot use `clojure.java.jdbc` functions inside `next.jdbc/with-transaction` and discuss how to migrate transaction-based code in the **Migration** guide.
* Address #169 by expanding the description of `with-transaction` in **Getting Started**.
* Cross-link to HoneySQL documentation for JSON/JSONB manipulation.
* Remove superfluous prev/next links in docs (cljdoc does this automatically now).
* Update `depstar`, `test-runner`, and CI versions. Add example `build.clj` to run tests in a subprocess (purely educational).
* 1.2.674 -- 2021-06-16
* Fix #167 by adding `:property-separator` to `next.jdbc.connection/dbtypes` and using it in `jdbc-url`.
* Address #166 by adding `next.jdbc/with-logging` to create a wrapped connectable that will invoke logging functions with the SQL/parameters and optionally the result or exception for each operation.
* Fix `:unit_count` references in **Getting Started** (were `:unit_cost`).
* Update `test-runner`.
* 1.2.659 -- 2021-05-05
* Address #164 by making `clj-commons/camel-snake-kebab` an unconditional dependency. _[Being a conditional dependency that could be brought in at runtime caused problems with GraalVM-based native compilation as well as with multi-project monorepos]_
* Add **Tips & Tricks** section about working with PostgreSQL "interval" types (via PR #163 from @snorremd).
* Address #162 by adding GraalVM to the test matrix (thank you @DeLaGuardo).
* Update several dependency versions.
* 1.1.646 -- 2021-03-15
* Fix #161 by allowing `execute-batch!` to work with datasources and connections, and providing the SQL statement directly.
* 1.1.643 -- 2021-03-06
* Change coordinates to `com.github.seancorfield/next.jdbc` (although new versions will continue to be deployed to `seancorfield/next.jdbc` for a while -- see the [Clojars Verified Group Names policy](https://github.com/clojars/clojars-web/wiki/Verified-Group-Names)).
* Documented `next.jdbc.transaction/*nested-tx*` more thoroughly since that difference from `clojure.java.jdbc` has come up in conversation a few times recently.
* Fix #158 by documenting (and testing) `:allowMultiQueries true` as an option for MySQL/MariaDB to allow multiple statements to be executed and multiple result sets to be returned.
* Fix #157 by copying `next.jdbc.prepare/execute-batch!` to `next.jdbc/execute-batch!` (to avoid a circular dependency that previously relied on requiring `next.jdbc.result-set` at runtime -- which was problematic for GraalVM-based native compilation); **`next.jdbc.prepare/execute-batch!` is deprecated:** it will continue to exist and work, but is no longer documented. In addition, `next.jdbc.prepare/execute-batch!` now relies on a private `volatile!` in order to reference `next.jdbc.result-set/datafiable-result-set` so that it is GraalVM-friendly. Note: code that requires `next.jdbc.prepare` and uses `execute-batch!` without also requiring something that causes `next.jdbc.result-set` to be loaded will no longer return generated keys from `execute-batch!` but that's an almost impossible path since nearly all code that uses `execute-batch!` will have called `next.jdbc/prepare` to get the `PreparedStatement` in the first place.
* 1.1.613 -- 2020-11-05
* Fix #144 by ensuring `camel-snake-case` is properly required before use in an uberjar context.
* 1.1.610 -- 2020-10-19
* Fix #140 by adding `"duckdb"` to `next.jdbc.connection/dbtypes`.
* Change `next.jdbc.types/as-*` functions to use a thunk instead of a vector to convey metadata, so that wrapped values do not get unpacked by HoneySQL.
* Refactor reducing and folding code around `ResultSet`, so that `reducible-result-set` and `foldable-result-set` can be exposed for folks who want more control over processing result sets obtained from database metadata.
* `datafiable-result-set` can now be called without the `connectable` and/or `opts` arguments; a `nil` connectable now disables foreign key navigation in datafied results (rather than throwing an obscure exception).
* 1.1.588 -- 2020-09-09
* Fix #139 by adding `next.jdbc.plan/select-one!` and `next.jdbc.plan/select!`.
* If `ResultSet.getMetaData()` returns `null`, we assume the column count is zero, i.e., an empty result set. This should "never happen" but some JDBC drivers are badly behaved and their idea of an "empty result set" does not match the JDBC API spec.
* 1.1.582 -- 2020-08-05
* Fix #138 by exposing `next.jdbc.connection/jdbc-url` to build `:jdbcUrl` values that can be passed to `->pool` or `component`.
* 1.1.581 -- 2020-08-03
* Fix #137 by adding support for specifying username and password per-connection (if your datasource supports this).
* Document SQLite handling of `bool` and `bit` columns in a new **Tips & Tricks** section, inspired by #134.
* Address #133 by adding `:return-generated-keys` as an option on `execute-batch!`.
* 1.1.569 -- 2020-07-10
* Fix #132 by adding specs for `next.jdbc/with-options` and `next.jdbc.prepare/statement`; correct spec for `next.jdbc.connection/component`. PR #131 from @Briaoeuidhtns.
* Fix #130 by implementing `clojure.lang.ILookup` on the three builder adapters.
* Fix #129 by adding `with-column-value` to `RowBuilder` and a more generic `builder-adapter`.
* Fix #128 by adding a test for the "not found" arity of lookup on mapified result sets.
* Fix #121 by conditionally adding `next.jdbc/snake-kebab-opts`, `next.jdbc/unqualified-snake-kebab-opts`, `next.jdbc.result-set/as-kebab-maps`, and `next.jdbc.result-set/as-unqualified-kebab-maps` (which are present only if `camel-snake-kebab` is on your classpath). _As of 1.2.659, these are included unconditionally and `next.jdbc` depends directly on `camel-snake-kebab`._
* Correct MySQL batch statement rewrite tip: it's `:rewriteBatchedStatements true` (plural). Also surface the batch statement tips in the **Tips & Tricks** page.
* Clarify how combining is interleaving with reducing in **Reducing and Folding with `plan`**.
* Use "JDBC URL" consistently everywhere (instead of "JDBC URI" in several places).
* 1.1.547 -- 2020-06-29
* Address #125 by making the result of `plan` foldable (in the `clojure.core.reducers` sense).
* Address #124 by extending `next.jdbc.sql.builder/for-query` to support `:top` (SQL Server), `:limit` / `:offset` (MySQL/PostgreSQL), `:offset` / `:fetch` (SQL Standard) for `find-by-keys`.
* Address #117 by adding `next.jdbc.transaction/*nested-tx*` to provide control over how attempts to create nested transactions should be handled.
* Address #116 by adding a `:multi-rs` option to `execute!` to retrieve multiple result sets, for example from stored procedure calls or T-SQL scripts.
* Allow `:all` to be passed into `find-by-keys` instead of an example hash map or a where clause vector so all rows will be returned (expected to be used with `:offset` etc to support simple pagination of an entire table).
* Add `:columns` option to `find-by-keys` (and `get-by-id`) to specify a subset of columns to be returned in each row. This can also specify an alias for the column and allows for computed expressions to be selected with an alias.
* 1.0.478 -- 2020-06-24
* Address #123 by adding `next.jdbc.types` namespace, full of auto-generated `as-xxx` functions, one for each of the `java.sql.Types` values.
* 1.0.476 -- 2020-06-22
* Extend default options behavior to `next.jdbc.sql` functions.
* 1.0.475 -- 2020-06-22
* Add tests for `"jtds"` database driver (against MS SQL Server), making it officially supported.
* Switch from OpenTable Embedded PostgreSQL to Zonky's version, so that testing can move forward from PostgreSQL 10.11 to 12.2.0.
* Fix potential reflection warnings caused by `next.jdbc.prepare/statement` being incorrectly type-hinted.
* Address #122 by adding `next.jdbc/with-options` that lets you wrap up a connectable along with default options that should be applied to all operations on that connectable.
* Address #119 by clarifying realization actions in the docstrings for `row-number`, `column-names`, and `metadata`.
* Address #115 by adding equivalent of `db-do-commands` in the `clojure.java.jdbc` migration guide.
* Add log4j2 as a test dependency so that I have better control over logging (which makes debugging easier!).
* 1.0.462 -- 2020-05-31
* Addition of `next.jdbc.datafy` to provide more `datafy`/`nav` introspection (see the additional section in **datafy, nav, and :schema** for details).
* Addition of `next.jdbc.result-set/metadata` to provide (datafied) result set metadata within `plan`.
* 1.0.445 -- 2020-05-23
* Enhanced support in `plan` for "metadata" access: `row-number` and `column-names` can be called on the abstract row (even after calling `datafiable-row`). In addition, `Associative` access via numeric "keys" will read columns by index, and row abstractions now support `Indexed` access via `nth` (which will also read columns by index). Fixes #110.
* Support for Stuart Sierra's Component library, via `next.jdbc.connection/component`. See updated **Getting Started** guide for usage.
* Add example of getting generated keys from `execute-batch!`.
* Add MySQL-specific result set streaming tip.
* Add array handling example to PostgreSQL **Tips & Tricks**. PR #108 from @maxp.
* Investigate possible solutions for #106 (mutable transaction thread safety) -- experimental `locking` on `Connection` object.
* 1.0.424 -- 2020-04-10
* In **Tips & Tricks**, noted that MySQL returns `BLOB` columns as `byte[]` instead of `java.sql.Blob`.
* Address #103, #104 by adding a section on timeouts to **Tips & Tricks**.
* Fix #102 by allowing keywords or strings in `:return-keys`.
* Fix #101 by tightening the spec on a JDBC URL to correctly reflect that it must start with `jdbc:`.
* Add support for calling `.getLoginTimeout`/`.setLoginTimeout` on the reified `DataSource` returned by `get-datasource` when called on a hash map "db-spec" or JDBC URL string.
* Documentation improvements based on feedback (mostly from Slack), including a section on database metadata near the end of **Getting Started**.
* 1.0.409 -- 2020-03-16
* Address #100 by adding support for MariaDB (@green-coder). Set `NEXT_JDBC_TEST_MARIADB=true` as well as `NEXT_JDBC_TEST_MYSQL=true` in order to run tests against MariaDB.
* 1.0.405 -- 2020-03-14 (no code changes -- just documentation)
* Improve documentation around `plan` so `reduce` etc is more obvious.
* Attempt to drive readers to cljdoc.org instead of the GitHub version (which is harder to navigate).
* 1.0.395 -- 2020-03-02
* Add `read-as-instant` and `read-as-local` functions to `next.jdbc.date-time` to extend `ReadableColumn` so that SQL `DATE` and `TIMESTAMP` columns can be read as Java Time types.
* Specifically call out PostgreSQL as needing `next.jdbc.date-time` to enable automatic conversion of `java.util.Date` objects to SQL timestamps for prepared statements (#95).
* Split **Tips & Tricks** into its own page, with a whole new section on using JSON data types with PostgreSQL (#94 -- thank you @vharmain).
* Bump dependencies to latest.
* 1.0.384 -- 2020-02-28
* Add PostgreSQL streaming option information to **Tips & Tricks** (#87).
* Minor documentation fixes (including #85, #92, #93).
* Improve `Unknown dbtype` exception message (to clarify that `:classname` is also missing, #90).
* Fix #88 by using 1-arity `keyword` call when table name unavailable (or `:qualifier-fn` returns `nil` or an empty string); also allows `:qualifier-fn` function to be called on empty table name (so `:qualifier-fn (constantly "qual")` will now work much like `clojure.java.jdbc`'s `:qualifier "qual"` worked).
* Address #89, #91 by making minor performance tweaks to `next.jdbc.result-set` functions.
* Planning to move to MAJOR.MINOR.COMMITS versioning scheme (1.0.384).
* 1.0.13 -- 2019-12-20
* Fix #82 by adding `clojure.java.data`-based support for setting arbitrary properties on `Connection` and `PreparedStatement` objects, post-creation. Note: this uses the Java reflection API under the hood.
* Adds `next.jdbc.prepare/statement` to create `Statement` objects with all the options available to `prepare` except `:return-keys`.
* Update `org.clojure/java.data` to 0.1.5 (for property setting).
* Additional clarifications in the documentation based on feedback on Slack.
* 1.0.12 -- 2019-12-11
* Address #81 by splitting the SQL-building functions out of `next.jdbc.sql` into `next.jdbc.sql.builder`.
* Fix #80 by avoiding the auto-commit restore after a failed rollback in a failed transaction.
* Address #78 by documenting the `:connectionInitSql` workaround for HikariCP/PostgreSQL and non-default schemas.
* 1.0.11 -- 2019-12-07
* Fix #76 by avoiding conversions on `java.sql.Date` and `java.sql.Timestamp`.
* Add testing against Microsoft SQL Server (run tests with environment variables `NEXT_JDBC_TEST_MSSQL=yes` and `MSSQL_SA_PASSWORD` set to your local -- `127.0.0.1:1433` -- SQL Server `sa` user password; assumes that it can create and drop `fruit` and `fruit_time` tables in the `model` database).
* Add testing against MySQL (run tests with environment variables `NEXT_JDBC_TEST_MYSQL=yes` and `MYSQL_ROOT_PASSWORD` set to your local -- `127.0.0.1:3306` -- MySQL `root` user password; assumes you have already created an empty database called `clojure_test`).
* Bump several JDBC driver versions for up-to-date testing.
* Minor documentation fixes.
* 1.0.10 -- 2019-11-14
* Fix #75 by adding support for `java.sql.Statement` to `plan`, `execute!`, and `execute-one!`.
* Address #74 by making several small changes to satisfy Eastwood.
* Fix #73 by providing a new, optional namespace `next.jdbc.date-time` that can be required if your database driver needs assistance converting `java.util.Date` (PostgreSQL!) or the Java Time types to SQL `timestamp` (or SQL `date`/`time`).
* Fix link to **All The Options** in **Migration from `clojure.java.jdbc`**. PR #71 (@laurio).
* Address #70 by adding **CLOB & BLOB SQL Types** to the **Tips & Tricks** section of **Friendly SQL Functions** and by adding `next.jdbc.result-set/clob-column-reader` and `next.jdbc.result-set/clob->string` helper to make it easier to deal with `CLOB` column data.
* Clarify what `execute!` and `execute-one!` produce when the result set is empty (`[]` and `nil` respectively, and there are now tests for this). Similarly for `find-by-keys` and `get-by-id`.
* Add **MS SQL Server** section to **Tips & Tricks** to note that it returns an empty string for table names by default (so table-qualified column names are not available). Using the `:result-type` (scroll) and `:concurrency` options will cause table names to be returned.
* Clarify that **Friendly SQL Functions** are deliberately simple (hint: they will not be enhanced or expanded -- use `plan`, `execute!`, and `execute-one!` instead, with a DSL library if you want!).
* Improve migration docs: explicitly recommend the use of a datasource for code that needs to work with both `clojure.java.jdbc` and `next.jdbc`; add caveats about column name conflicts (in several places).
* Improve `datafy`/`nav` documentation around `:schema`.
* Update `org.clojure/java.data` to `"0.1.4"` (0.1.2 fixes a number of reflection warnings).
* 1.0.9 -- 2019-10-11
* Address #69 by trying to clarify when to use `execute-one!` vs `execute!` vs `plan`.
* Address #68 by clarifying that builder functions do not affect the "fake result set" containing `:next.jdbc/update-count`.
* Fix #67 by adding `:jdbcUrl` version spec.
* Add `next.jdbc.optional/as-maps-adapter` to provide a way to override the default result set reading behavior of using `.getObject` when omitting SQL `NULL` values from result set maps.
* 1.0.8 -- 2019-09-27
* Fix #66 by adding support for a db-spec hash map format containing a `:jdbcUrl` key (consistent with `->pool`) so that you can create a datasource from a JDBC URL string and additional options.
* Address #65 by adding a HugSQL "quick start" to the **Friendly SQL Functions** section of the docs.
* Add `next.jdbc.specs/unstrument`. PR #64 (@gerred).
* Address #63 by improving documentation around qualified column names and `:qualifier` (`clojure.java.jdbc`) migration, with a specific caveat about Oracle not fully supporting `.getTableName()`.
* 1.0.7 -- 2019-09-09
* Address #60 by supporting simpler schema entry formats: `:table/column` is equivalent to the old `[:table :column :one]` and `[:table/column]` is equivalent to the old `[:table :column :many]`. The older formats will continue to be supported but should be considered deprecated. PR #62 (@seancorfield).
* Added test for using `ANY(?)` and arrays in PostgreSQL for `IN (?,,,?)` style queries. Added a **Tips & Tricks** section to **Friendly SQL Functions** with database-specific suggestions, that starts with this one.
* Improved documentation in several areas.
* 1.0.6 -- 2019-08-24
* Improved documentation around `insert-multi!` and `execute-batch!` (addresses #57).
* Fix #54 by improving documentation around data type conversions (and the `ReadableColumn` and `SettableParameter` protocols).
* Fix #52 by using a US-locale function in the "lower" result set builders to avoid unexpected character changes in column names in locales such as Turkish. If you want the locale-sensitive behavior, pass `clojure.string/lower-case` into one of the "modified" result set builders.
* Add `next.jdbc.result-set/as-maps-adapter` and `next.jdbc.result-set/as-arrays-adapter` to provide a way to override the default result set reading behavior of using `.getObject`.
* Update `org.clojure/test.check` to `"0.10.0"`.
* 1.0.5 -- 2019-08-05
* Fix #51 by implementing `IPersistentMap` fully for the "mapified" result set inside `plan`. This adds support for `dissoc` and `cons` (which will both realize a row), `count` (which returns the column count but does not realize a row), `empty` (returns an empty hash map without realizing a row), etc.
* Improved documentation around connection pooling (HikariCP caveats).
* 1.0.4 -- 2019-07-24
* Fix #50 by adding machinery to test against (embedded) PostgreSQL!
* Improved documentation for connection pooled datasources (including adding a Component example); clarified the recommendations for globally overriding default options (write a wrapper namespace that suits your usage).
* Note: this release is primarily to fix the cljdoc.org documentation via repackaging the JAR file.
* 1.0.3 -- 2019-07-23
* Fix #48 by adding `next.jdbc.connection/->pool` and documenting how to use HikariCP and c3p0 in the Getting Started docs (as well as adding tests for both libraries).
* Documentation improvements, including examples of extending `ReadableColumn` and `SettableParameter`.
* Updated test dependencies (testing against more recent versions of several drivers).
* 1.0.2 -- 2019-07-15
* Fix #47 by refactoring database specs to be a single hash map instead of pouring multiple maps into one.
* Fix #46 by allowing `:host` to be `:none` which tells `next.jdbc` to omit the host/port section of the JDBC URL, so that local databases can be used with `:dbtype`/`:classname` for database types that `next.jdbc` does not know. Also added `:dbname-separator` and `:host-prefix` to the "db-spec" to allow fine-grained control over how the JDBC URL is assembled.
* Fix #45 by adding [TimesTen](https://www.oracle.com/database/technologies/related/timesten.html) driver support.
* Fix #44 so that `insert-multi!` with an empty `rows` vector returns `[]`.
* Fix #43 by adjusting the spec for `insert-multi!` to "require less" of the `cols` and `rows` arguments.
* Fix #42 by adding specs for `execute-batch!` and `set-parameters` in `next.jdbc.prepare`.
* Fix #41 by improving docstrings and documentation, especially around prepared statement handling.
* Fix #40 by adding `next.jdbc/execute-batch!` (previously `next.jdbc.prepare/execute-batch!`).
* Added `assert`s in `next.jdbc.sql` as more informative errors for cases that would generate SQL exceptions (from malformed SQL).
* Added spec for `:order-by` to reflect what is actually permitted.
* Expose `next.jdbc.connect/dbtypes` as a table of known database types and aliases, along with their class name(s), port, and other JDBC string components.
* 1.0.1 -- 2019-07-03
* Fix #37 by adjusting the spec for `with-transaction` to "require less" of the `:binding` vector.
* Fix #36 by adding type hint in `with-transaction` macro.
* Fix #35 by explaining the database-specific options needed to ensure `insert-multi!` performs a single, batched operation.
* Fix #34 by explaining save points (in the Transactions documentation).
* Fix #33 by updating the spec for the example `key-map` in `find-by-keys`, `update!`, and `delete!` to reflect that you cannot pass an empty map to these functions (and added tests to ensure the calls fail with spec errors).
* 1.0.0 "gold" -- 2019-06-12
* Address #31 by making `reify`'d objects produce a more informative string representation if they are printed (e.g., misusing `plan` by not reducing it or not mapping an operation over the rows).
* Fix #26 by exposing `next.jdbc.result-set/datafiable-result-set` so that various `java.sql.DatabaseMetaData` methods that return result metadata information in `ResultSet`s can be easily turned into a fully realized result set.
* 1.0.0-rc1 -- 2019-06-04
* Fix #24 by adding return type hints to `next.jdbc` functions.
* Fix #22 by adding `next.jdbc.optional` with six map builders that omit `NULL` columns from the row hash maps.
* Documentation improvements (#27, #28, and #29), including changing "connectable" to "transactable" for the `transact` function and the `with-transaction` macro (for consistency with the name of the underlying protocol).
* Fix #30 by adding `modified` variants of column name functions and builders. The `lower` variants have been rewritten in terms of these new `modified` variants. This adds `:label-fn` and `:qualifier-fn` options that mirror `:column-fn` and `:table-fn` for row builders.
* 1.0.0-beta1 -- 2019-05-24
* Set up CircleCI testing (just local DBs for now).
* Address #21 by adding `next.jdbc.specs` and documenting basic usage.
* Fix #19 by caching loaded database driver classes.
* Address #16 by renaming `reducible!` to `plan` (**BREAKING CHANGE!**).
* Address #3 by deciding to maintain this library outside Clojure Contrib.
## Alpha Builds
* 1.0.0-alpha13 -- 2019-05-04 -- Fix #18 by removing more keys from properties when creating connections.
* 1.0.0-alpha12 -- 2019-04-26 -- Fix #17 by renaming `:next.jdbc/sql-string` to `:next.jdbc/sql-params` (**BREAKING CHANGE!**) and pass whole vector.
* 1.0.0-alpha11 -- 2019-04-24 -- Rename `:gen-fn` to `:builder-fn` (**BREAKING CHANGE!**); Fix #13 by adding documentation for `datafy`/`nav`/`:schema`; Fix #15 by automatically adding `:next.jdbc/sql-string` (as of 1.0.0-alpha12: `:next.jdbc/sql-params`) into the options hash map, so custom builders can depend on the SQL string.
* 1.0.0-alpha9 -- 2019-04-22 -- Fix #14 by respecting `:gen-fn` (as of 1.0.0-alpha11: `:builder-fn`) in `execute-one!` for `PreparedStatement`.
* 1.0.0-alpha8 -- 2019-04-21 -- Initial publicly announced release.
next-jdbc-1.3.955/CODE_OF_CONDUCT.md 0000664 0000000 0000000 00000006425 14700603111 0016353 0 ustar 00root root 0000000 0000000 # Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to making participation in our project and
our community a harassment-free experience for everyone, regardless of age, body
size, disability, ethnicity, sex characteristics, gender identity and expression,
level of experience, education, socio-economic status, nationality, personal
appearance, race, religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment
include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or
advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic
address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable
behavior and are expected to take appropriate and fair corrective action in
response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct, or to ban temporarily or
permanently any contributor for other behaviors that they deem inappropriate,
threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community. Examples of
representing a project or community include using an official project e-mail
address, posting via an official social media account, or acting as an appointed
representative at an online or offline event. Representation of a project may be
further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting the project team at sean@corfield.org. All
complaints will be reviewed and investigated and will result in a response that
is deemed necessary and appropriate to the circumstances. The project team is
obligated to maintain confidentiality with regard to the reporter of an incident.
Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good
faith may face temporary or permanent repercussions as determined by other
members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see
https://www.contributor-covenant.org/faq
next-jdbc-1.3.955/CONTRIBUTING.md 0000664 0000000 0000000 00000001174 14700603111 0016001 0 ustar 00root root 0000000 0000000 # Contributing to `next.jdbc`
Feel free to [open Issues](https://github.com/seancorfield/next-jdbc/issues) with opinions and suggestions -- I'm happy to have discussions about any aspect of the library!
I welcome Pull Requests for source code changes *that are accompanied by tests*, and for any documentation changes. For any substantial change, please open an issue for discussion first, or find me in the `#sql` stream on Clojurians Zulip or the `#sql` channel on Clojurians Slack to chat about it.
In particular, **no breaking changes will be entertained**. All future changes must either be purely accretive or purely fixative.
next-jdbc-1.3.955/LICENSE 0000664 0000000 0000000 00000025722 14700603111 0014562 0 ustar 00root root 0000000 0000000 THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC
LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM
CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT.
1. DEFINITIONS
"Contribution" means:
a) in the case of the initial Contributor, the initial code and
documentation distributed under this Agreement, and
b) in the case of each subsequent Contributor:
i) changes to the Program, and
ii) additions to the Program;
where such changes and/or additions to the Program originate from and are
distributed by that particular Contributor. A Contribution 'originates' from
a Contributor if it was added to the Program by such Contributor itself or
anyone acting on such Contributor's behalf. Contributions do not include
additions to the Program which: (i) are separate modules of software
distributed in conjunction with the Program under their own license
agreement, and (ii) are not derivative works of the Program.
"Contributor" means any person or entity that distributes the Program.
"Licensed Patents" mean patent claims licensable by a Contributor which are
necessarily infringed by the use or sale of its Contribution alone or when
combined with the Program.
"Program" means the Contributions distributed in accordance with this
Agreement.
"Recipient" means anyone who receives the Program under this Agreement,
including all Contributors.
2. GRANT OF RIGHTS
a) Subject to the terms of this Agreement, each Contributor hereby grants
Recipient a non-exclusive, worldwide, royalty-free copyright license to
reproduce, prepare derivative works of, publicly display, publicly perform,
distribute and sublicense the Contribution of such Contributor, if any, and
such derivative works, in source code and object code form.
b) Subject to the terms of this Agreement, each Contributor hereby grants
Recipient a non-exclusive, worldwide, royalty-free patent license under
Licensed Patents to make, use, sell, offer to sell, import and otherwise
transfer the Contribution of such Contributor, if any, in source code and
object code form. This patent license shall apply to the combination of the
Contribution and the Program if, at the time the Contribution is added by the
Contributor, such addition of the Contribution causes such combination to be
covered by the Licensed Patents. The patent license shall not apply to any
other combinations which include the Contribution. No hardware per se is
licensed hereunder.
c) Recipient understands that although each Contributor grants the licenses
to its Contributions set forth herein, no assurances are provided by any
Contributor that the Program does not infringe the patent or other
intellectual property rights of any other entity. Each Contributor disclaims
any liability to Recipient for claims brought by any other entity based on
infringement of intellectual property rights or otherwise. As a condition to
exercising the rights and licenses granted hereunder, each Recipient hereby
assumes sole responsibility to secure any other intellectual property rights
needed, if any. For example, if a third party patent license is required to
allow Recipient to distribute the Program, it is Recipient's responsibility
to acquire that license before distributing the Program.
d) Each Contributor represents that to its knowledge it has sufficient
copyright rights in its Contribution, if any, to grant the copyright license
set forth in this Agreement.
3. REQUIREMENTS
A Contributor may choose to distribute the Program in object code form under
its own license agreement, provided that:
a) it complies with the terms and conditions of this Agreement; and
b) its license agreement:
i) effectively disclaims on behalf of all Contributors all warranties and
conditions, express and implied, including warranties or conditions of title
and non-infringement, and implied warranties or conditions of merchantability
and fitness for a particular purpose;
ii) effectively excludes on behalf of all Contributors all liability for
damages, including direct, indirect, special, incidental and consequential
damages, such as lost profits;
iii) states that any provisions which differ from this Agreement are offered
by that Contributor alone and not by any other party; and
iv) states that source code for the Program is available from such
Contributor, and informs licensees how to obtain it in a reasonable manner on
or through a medium customarily used for software exchange.
When the Program is made available in source code form:
a) it must be made available under this Agreement; and
b) a copy of this Agreement must be included with each copy of the Program.
Contributors may not remove or alter any copyright notices contained within
the Program.
Each Contributor must identify itself as the originator of its Contribution,
if any, in a manner that reasonably allows subsequent Recipients to identify
the originator of the Contribution.
4. COMMERCIAL DISTRIBUTION
Commercial distributors of software may accept certain responsibilities with
respect to end users, business partners and the like. While this license is
intended to facilitate the commercial use of the Program, the Contributor who
includes the Program in a commercial product offering should do so in a
manner which does not create potential liability for other Contributors.
Therefore, if a Contributor includes the Program in a commercial product
offering, such Contributor ("Commercial Contributor") hereby agrees to defend
and indemnify every other Contributor ("Indemnified Contributor") against any
losses, damages and costs (collectively "Losses") arising from claims,
lawsuits and other legal actions brought by a third party against the
Indemnified Contributor to the extent caused by the acts or omissions of such
Commercial Contributor in connection with its distribution of the Program in
a commercial product offering. The obligations in this section do not apply
to any claims or Losses relating to any actual or alleged intellectual
property infringement. In order to qualify, an Indemnified Contributor must:
a) promptly notify the Commercial Contributor in writing of such claim, and
b) allow the Commercial Contributor tocontrol, and cooperate with the
Commercial Contributor in, the defense and any related settlement
negotiations. The Indemnified Contributor may participate in any such claim
at its own expense.
For example, a Contributor might include the Program in a commercial product
offering, Product X. That Contributor is then a Commercial Contributor. If
that Commercial Contributor then makes performance claims, or offers
warranties related to Product X, those performance claims and warranties are
such Commercial Contributor's responsibility alone. Under this section, the
Commercial Contributor would have to defend claims against the other
Contributors related to those performance claims and warranties, and if a
court requires any other Contributor to pay any damages as a result, the
Commercial Contributor must pay those damages.
5. NO WARRANTY
EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON
AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER
EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR
CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A
PARTICULAR PURPOSE. Each Recipient is solely responsible for determining the
appropriateness of using and distributing the Program and assumes all risks
associated with its exercise of rights under this Agreement , including but
not limited to the risks and costs of program errors, compliance with
applicable laws, damage to or loss of data, programs or equipment, and
unavailability or interruption of operations.
6. DISCLAIMER OF LIABILITY
EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY
CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION
LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE
EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY
OF SUCH DAMAGES.
7. GENERAL
If any provision of this Agreement is invalid or unenforceable under
applicable law, it shall not affect the validity or enforceability of the
remainder of the terms of this Agreement, and without further action by the
parties hereto, such provision shall be reformed to the minimum extent
necessary to make such provision valid and enforceable.
If Recipient institutes patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Program itself
(excluding combinations of the Program with other software or hardware)
infringes such Recipient's patent(s), then such Recipient's rights granted
under Section 2(b) shall terminate as of the date such litigation is filed.
All Recipient's rights under this Agreement shall terminate if it fails to
comply with any of the material terms or conditions of this Agreement and
does not cure such failure in a reasonable period of time after becoming
aware of such noncompliance. If all Recipient's rights under this Agreement
terminate, Recipient agrees to cease use and distribution of the Program as
soon as reasonably practicable. However, Recipient's obligations under this
Agreement and any licenses granted by Recipient relating to the Program shall
continue and survive.
Everyone is permitted to copy and distribute copies of this Agreement, but in
order to avoid inconsistency the Agreement is copyrighted and may only be
modified in the following manner. The Agreement Steward reserves the right to
publish new versions (including revisions) of this Agreement from time to
time. No one other than the Agreement Steward has the right to modify this
Agreement. The Eclipse Foundation is the initial Agreement Steward. The
Eclipse Foundation may assign the responsibility to serve as the Agreement
Steward to a suitable separate entity. Each new version of the Agreement will
be given a distinguishing version number. The Program (including
Contributions) may always be distributed subject to the version of the
Agreement under which it was received. In addition, after a new version of
the Agreement is published, Contributor may elect to distribute the Program
(including its Contributions) under the new version. Except as expressly
stated in Sections 2(a) and 2(b) above, Recipient receives no rights or
licenses to the intellectual property of any Contributor under this
Agreement, whether expressly, by implication, estoppel or otherwise. All
rights in the Program not expressly granted under this Agreement are
reserved.
This Agreement is governed by the laws of the State of New York and the
intellectual property laws of the United States of America. No party to this
Agreement will bring a legal action under this Agreement more than one year
after the cause of action arose. Each party waives its rights to a jury trial
in any resulting litigation.
next-jdbc-1.3.955/README.md 0000664 0000000 0000000 00000031334 14700603111 0015030 0 ustar 00root root 0000000 0000000 # next.jdbc [](https://github.com/seancorfield/next-jdbc/actions/workflows/test-and-release.yml) [](https://github.com/seancorfield/next-jdbc/actions/workflows/test-and-snapshot.yml) [](https://github.com/seancorfield/next-jdbc/actions/workflows/test.yml)
The next generation of `clojure.java.jdbc`: a new low-level Clojure wrapper for JDBC-based access to databases.
**Featured in [Jacek Schae's Learn Reitit Pro online course](https://www.jacekschae.com/learn-reitit-pro/pfec2)!**
## TL;DR
The latest versions on Clojars and on cljdoc:
[](https://clojars.org/com.github.seancorfield/next.jdbc)
[](https://cljdoc.org/d/com.github.seancorfield/next.jdbc/CURRENT)
[](https://clojurians.slack.com/app_redirect?channel=sql)
[](http://clojurians.net)
The documentation on [cljdoc.org](https://cljdoc.org/d/com.github.seancorfield/next.jdbc/CURRENT) is for the current version of `next.jdbc`:
* [Getting Started](https://cljdoc.org/d/com.github.seancorfield/next.jdbc/CURRENT/doc/getting-started)
* [API Reference](https://cljdoc.org/d/com.github.seancorfield/next.jdbc/CURRENT/api/next)
* [Migrating from `clojure.java.jdbc`](https://cljdoc.org/d/com.github.seancorfield/next.jdbc/CURRENT/doc/migration-from-clojure-java-jdbc)
* Feedback via [issues](https://github.com/seancorfield/next-jdbc/issues) or in the [`#sql` channel on the Clojurians Slack](https://clojurians.slack.com/messages/C1Q164V29/) or the [`#sql` stream on the Clojurians Zulip](https://clojurians.zulipchat.com/#narrow/stream/152063-sql).
The documentation on GitHub is for **develop** since the 1.3.955 release -- [see the CHANGELOG](https://github.com/seancorfield/next-jdbc/blob/develop/CHANGELOG.md) and then read the [corresponding updated documentation](https://github.com/seancorfield/next-jdbc/tree/develop/doc) on GitHub if you want. Older versions of `next.jdbc` were published under the `seancorfield` group ID and you can find [older seancorfield/next.jdbc documentation on cljdoc.org](https://cljdoc.org/versions/seancorfield/next.jdbc).
This project follows the version scheme MAJOR.MINOR.COMMITS where MAJOR and MINOR provide some relative indication of the size of the change, but do not follow semantic versioning. In general, all changes endeavor to be non-breaking (by moving to new names rather than by breaking existing names). COMMITS is an ever-increasing counter of commits since the beginning of this repository.
> Note: every commit to the **develop** branch runs CI (GitHub Actions) and successful runs push a MAJOR.MINOR.999-SNAPSHOT build to Clojars so the very latest version of `next.jdbc` is always available either via that [snapshot on Clojars](https://clojars.org/com.github.seancorfield/next.jdbc) or via a git dependency on the latest SHA.
## Motivation
Why another JDBC library? Why a different API from `clojure.java.jdbc`?
* Performance: there's a surprising amount of overhead in how `ResultSet` objects are converted to sequences of hash maps in `clojure.java.jdbc` – which can be really noticeable for large result sets – so I wanted a better way to handle that. There's also quite a bit of overhead and complexity in all the conditional logic and parsing that is associated with `db-spec`-as-hash-map.
* A more modern API, based on using qualified keywords and transducers etc: `:qualifier` and `reducible-query` in recent `clojure.java.jdbc` versions were steps toward that but there's a lot of "legacy" API in the library and I want to present a more focused, more streamlined API so folks naturally use the `IReduceInit` / transducer approach from day one and benefit from qualified keywords.
* Simplicity: `clojure.java.jdbc` uses a variety of ways to execute SQL which can lead to inconsistencies and surprises – `query`, `execute!`, and `db-do-commands` are all different ways to execute different types of SQL statement so you have to remember which is which and you often have to watch out for restrictions in the underlying JDBC API.
Those were my three primary drivers. In addition, the `db-spec`-as-hash-map approach in `clojure.java.jdbc` has caused a lot of frustration and confusion in the past, especially with the wide range of conflicting options that are supported. `next.jdbc` is heavily protocol-based so it's easier to mix'n'match how you use it with direct Java JDBC code (and the protocol-based approach contributes to the improved performance overall). There's a much clearer path of `db-spec` -> `DataSource` -> `Connection` now, which should steer people toward more connection reuse and better performing apps.
I also wanted `datafy`/`nav` support baked right in (it was added to `clojure.java.jdbc` back in December 2018 as an undocumented, experimental API in a separate namespace). It is the default behavior for `execute!` and `execute-one!`. The protocol-based function `next.jdbc.result-set/datafiable-row` can be used with `plan` if you need to add `datafy`/`nav` support to rows you are creating in your reduction.
As `next.jdbc` moved from alpha to beta, the last breaking change was made (renaming `reducible!` to `plan`) and the API should be considered stable. Only accretive and fixative changes will be made from now on.
After a month of alpha builds being available for testing, the first beta build was released on May 24th, 2019. A release candidate followed on June 4th and the "gold" (1.0.0) release was on June 12th. In addition to the small, core API in `next.jdbc`, there are "syntactic sugar" SQL functions (`insert!`, `query`, `update!`, and `delete!`) available in `next.jdbc.sql` that are similar to the main API in `clojure.java.jdbc`. See [Migrating from `clojure.java.jdbc`](https://cljdoc.org/d/com.github.seancorfield/next.jdbc/CURRENT/doc/migration-from-clojure-java-jdbc) for more detail about the differences.
## Usage
The primary concepts behind `next.jdbc` are that you start by producing a `javax.sql.DataSource`. You can create a pooled datasource object using your preferred library (c3p0, hikari-cp, etc). You can use `next.jdbc`'s `get-datasource` function to create a `DataSource` from a `db-spec` hash map or from a JDBC URL (string). The underlying protocol, `Sourceable`, can be extended to allow more things to be turned into a `DataSource` (and can be extended via metadata on an object as well as via types).
From a `DataSource`, either you or `next.jdbc` can create a `java.sql.Connection` via the `get-connection` function. You can specify an options hash map to `get-connection` to modify the connection that is created: `:read-only`, `:auto-commit`.
The primary SQL execution API in `next.jdbc` is:
* `plan` -- yields an `IReduceInit` that, when reduced with an initial value, executes the SQL statement and then reduces over the `ResultSet` with as little overhead as possible.
* `execute!` -- executes the SQL statement and produces a vector of realized hash maps, that use qualified keywords for the column names, of the form `:
/`. If you join across multiple tables, the qualified keywords will reflect the originating tables for each of the columns. If the SQL produces named values that do not come from an associated table, a simple, unqualified keyword will be used. The realized hash maps returned by `execute!` are `Datafiable` and thus `Navigable` (see Clojure 1.10's `datafy` and `nav` functions, and tools like [Portal](https://github.com/djblue/portal), [Reveal](https://github.com/vlaaad/reveal), and Cognitect's REBL). Alternatively, you can specify `{:builder-fn rs/as-arrays}` and produce a vector with column names followed by vectors of row values. `rs/as-maps` is the default for `:builder-fn` but there are also `rs/as-unqualified-maps` and `rs/as-unqualified-arrays` if you want unqualified `:` column names (and there are also lower-case variants of all of these).
* `execute-one!` -- executes the SQL or DDL statement and produces a single realized hash map. The realized hash map returned by `execute-one!` is `Datafiable` and thus `Navigable`.
In addition, there are API functions to create `PreparedStatement`s (`prepare`) from `Connection`s, which can be passed to `plan`, `execute!`, or `execute-one!`, and to run code inside a transaction (the `transact` function and the `with-transaction` macro).
Since `next.jdbc` uses raw Java JDBC types, you can use `with-open` directly to reuse connections and ensure they are cleaned up correctly:
```clojure
(let [my-datasource (jdbc/get-datasource {:dbtype "..." :dbname "..." ...})]
(with-open [connection (jdbc/get-connection my-datasource)]
(jdbc/execute! connection [...])
(reduce my-fn init-value (jdbc/plan connection [...]))
(jdbc/execute! connection [...])))
```
### Usage scenarios
There are three intended usage scenarios that have driven the design of the API:
* Execute a SQL statement and process it in a single eager operation, which may allow for the results to be streamed from the database (how to persuade JDBC to do that is database-specific!), and which cleans up resources before returning the result -- even if the reduction is short-circuited via `reduced`. This usage is supported by `plan`. This is likely to be the fastest approach and should be the first option you consider for SQL queries.
* Execute a SQL or DDL statement to obtain a single, fully-realized, `Datafiable` hash map that represents either the first row from a `ResultSet`, the first generated keys result (again, from a `ResultSet`), or the first result where neither of those are available (`next.jdbc` yields `{:next.jdbc/update-count N}` when it can only return an update count). This usage is supported by `execute-one!`. This is probably your best choice for most non-query operations.
* Execute a SQL statement to obtain a fully-realized, `Datafiable` result set -- a vector of hash maps. This usage is supported by `execute!`. You can also produce a vector of column names/row values (`next.jdbc.result-set/as-arrays`).
In addition, convenience functions -- "syntactic sugar" -- are provided to insert rows, run queries, update rows, and delete rows, using the same names as in `clojure.java.jdbc`. These are in `next.jdbc.sql` since they involve SQL creation -- they are not considered part of the core API.
## More Detailed Documentation
* [Getting Started](/doc/getting-started.md)
* [Friendly SQL Functions](/doc/friendly-sql-functions.md)
* [Tips & Tricks](/doc/tips-and-tricks.md)
* [Result Set Builders](/doc/result-set-builders.md)
* [Prepared Statements](/doc/prepared-statements.md)
* [Transactions](/doc/transactions.md)
* [All The Options](/doc/all-the-options.md)
* [`datafy`, `nav`, and `:schema`](/doc/datafy-nav-and-schema.md)
* [Migration from `clojure.java.jdbc`](/doc/migration-from-clojure-java-jdbc.md)
## License
Copyright © 2018-2021 Sean Corfield
Distributed under the Eclipse Public License version 1.0.
next-jdbc-1.3.955/build.clj 0000664 0000000 0000000 00000005244 14700603111 0015343 0 ustar 00root root 0000000 0000000 (ns build
"next.jdbc's build script.
clojure -T:build ci
clojure -T:build deploy
Run tests via:
clojure -X:test
For more information, run:
clojure -A:deps -T:build help/doc"
(:refer-clojure :exclude [test])
(:require [clojure.tools.build.api :as b]
[deps-deploy.deps-deploy :as dd]))
(def lib 'com.github.seancorfield/next.jdbc)
(defn- the-version [patch] (format "1.3.%s" patch))
(def version (the-version (b/git-count-revs nil)))
(def snapshot (the-version "999-SNAPSHOT"))
(def class-dir "target/classes")
(defn test "Run all the tests." [opts]
(doseq [alias [:1.10 :1.11 :1.12]]
(println "\nRunning tests for Clojure" (name alias))
(let [basis (b/create-basis {:aliases [:test alias]})
cmds (b/java-command
{:basis basis
:main 'clojure.main
:main-args ["-m" "cognitect.test-runner"]})
{:keys [exit]} (b/process cmds)]
(when-not (zero? exit) (throw (ex-info "Tests failed" {})))))
opts)
(defn- pom-template [version]
[[:description "The next generation of clojure.java.jdbc: a new low-level Clojure wrapper for JDBC-based access to databases."]
[:url "https://github.com/seancorfield/next-jdbc"]
[:licenses
[:license
[:name "Eclipse Public License"]
[:url "http://www.eclipse.org/legal/epl-v10.html"]]]
[:developers
[:developer
[:name "Sean Corfield"]]]
[:scm
[:url "https://github.com/seancorfield/next-jdbc"]
[:connection "scm:git:https://github.com/seancorfield/next-jdbc.git"]
[:developerConnection "scm:git:ssh:git@github.com:seancorfield/next-jdbc.git"]
[:tag (str "v" version)]]])
(defn- jar-opts [opts]
(let [version (if (:snapshot opts) snapshot version)]
(assoc opts
:lib lib :version version
:jar-file (format "target/%s-%s.jar" lib version)
:basis (b/create-basis {})
:class-dir class-dir
:target "target"
:src-dirs ["src"]
:pom-data (pom-template version))))
(defn ci "Run the CI pipeline of tests (and build the JAR)." [opts]
(test opts)
(b/delete {:path "target"})
(let [opts (jar-opts opts)]
(println "\nWriting pom.xml...")
(b/write-pom opts)
(println "\nCopying source...")
(b/copy-dir {:src-dirs ["resources" "src"] :target-dir class-dir})
(println "\nBuilding" (:jar-file opts) "...")
(b/jar opts))
opts)
(defn deploy "Deploy the JAR to Clojars." [opts]
(let [{:keys [jar-file] :as opts} (jar-opts opts)]
(dd/deploy {:installer :remote :artifact (b/resolve-path jar-file)
:pom-file (b/pom-path (select-keys opts [:lib :class-dir]))}))
opts)
next-jdbc-1.3.955/deps.edn 0000664 0000000 0000000 00000006172 14700603111 0015176 0 ustar 00root root 0000000 0000000 {:mvn/repos {"sonatype" {:url "https://oss.sonatype.org/content/repositories/snapshots/"}}
:paths ["src" "resources"]
:deps {org.clojure/clojure {:mvn/version "1.10.3"}
org.clojure/java.data {:mvn/version "1.2.107"}
camel-snake-kebab/camel-snake-kebab {:mvn/version "0.4.3"}}
:aliases
{;; for help: clojure -A:deps -T:build help/doc
:build {:deps {io.github.clojure/tools.build {:mvn/version "0.10.5"}
slipset/deps-deploy {:mvn/version "0.2.2"}}
:ns-default build}
;; versions to test against:
:1.10 {:override-deps {org.clojure/clojure {:mvn/version "1.10.3"}}}
:1.11 {:override-deps {org.clojure/clojure {:mvn/version "1.11.4"}}}
:1.12 {:override-deps {org.clojure/clojure {:mvn/version "1.12.0"}}}
;; running tests/checks of various kinds:
:test {:extra-paths ["test"] ; can also run clojure -X:test
:extra-deps {org.clojure/test.check {:mvn/version "1.1.1"}
io.github.cognitect-labs/test-runner
{:git/tag "v0.5.1" :git/sha "dfb30dd"}
;; connection pooling
com.zaxxer/HikariCP {:mvn/version "6.0.0"}
com.mchange/c3p0 {:mvn/version "0.10.1"}
;; JDBC drivers
;; 10.16.x is JDK17+
org.apache.derby/derby {:mvn/version "10.15.2.0"}
org.apache.derby/derbyshared {:mvn/version "10.15.2.0"}
org.hsqldb/hsqldb {:mvn/version "2.7.3"}
com.h2database/h2 {:mvn/version "2.3.232"}
net.sourceforge.jtds/jtds {:mvn/version "1.3.1"}
org.mariadb.jdbc/mariadb-java-client {:mvn/version "3.4.1"}
com.mysql/mysql-connector-j {:mvn/version "9.0.0"}
;; 42.7.4 changes update count (to -1) for stored procs:
org.postgresql/postgresql {:mvn/version "42.7.4"}
io.zonky.test/embedded-postgres {:mvn/version "2.0.7"}
io.zonky.test.postgres/embedded-postgres-binaries-darwin-amd64 {:mvn/version "17.0.0"}
io.zonky.test.postgres/embedded-postgres-binaries-linux-amd64 {:mvn/version "17.0.0"}
io.zonky.test.postgres/embedded-postgres-binaries-windows-amd64 {:mvn/version "17.0.0"}
org.xerial/sqlite-jdbc {:mvn/version "3.46.1.3"}
com.microsoft.sqlserver/mssql-jdbc {:mvn/version "12.8.1.jre11"}
;; use log4j2 to reduce log noise during testing:
org.apache.logging.log4j/log4j-api {:mvn/version "2.24.0"}
;; bridge everything into log4j:
org.apache.logging.log4j/log4j-1.2-api {:mvn/version "2.24.0"}
org.apache.logging.log4j/log4j-jcl {:mvn/version "2.24.0"}
org.apache.logging.log4j/log4j-jul {:mvn/version "2.24.0"}
org.apache.logging.log4j/log4j-slf4j-impl {:mvn/version "2.24.0"}}
:jvm-opts ["-Dlog4j2.configurationFile=log4j2-info.properties"]
:exec-fn cognitect.test-runner.api/test}}}
next-jdbc-1.3.955/doc/ 0000775 0000000 0000000 00000000000 14700603111 0014312 5 ustar 00root root 0000000 0000000 next-jdbc-1.3.955/doc/all-the-options.md 0000664 0000000 0000000 00000036050 14700603111 0017657 0 ustar 00root root 0000000 0000000 # `next.jdbc` Options
This section documents all of the options that are supported by all of the functions in `next.jdbc`. Nearly every function accepts an optional hash map as the last argument, that can control many of the behaviors of the library.
The most general options are described first, followed by more specific options that apply only to certain functions.
## Datasources and Connections
Although `get-datasource` does not accept options, the "db spec" hash map passed in may contain the following options:
* `:dbtype` -- a string that identifies the type of JDBC database being used,
* `:dbname` -- a string that identifies the name of the actual database being used,
* `:dbname-separator` -- an optional string that can be used to override the `/` or `:` that is normally placed in front of the database name in the JDBC URL,
* `:host` -- an optional string that identifies the IP address or hostname of the server on which the database is running; the default is `"127.0.0.1"`; if `:none` is specified, `next.jdbc` will assume this is for a local database and will omit the host/port segment of the JDBC URL,
* `:host-prefix` -- an optional string that can be used to override the `//` that is normally placed in front of the IP address or hostname in the JDBC URL,
* `:port` -- an optional integer that identifies the port on which the database is running; for common database types, `next.jdbc` knows the default so this should only be needed for non-standard setups or "exotic" database types; if `:none` is specified, `next.jdbc` will omit the port segment of the JDBC URL,
* `:property-separator` -- an optional string that can be used to override the separators used in `next.jdbc.connection/jdbc-url` for the properties (after the initial JDBC URL portion); by default `?` and `&` are used to build JDBC URLs with properties; for SQL Server drivers (both MS and jTDS) `:property-separator ";"` is used, so this option should only be necessary when you are specifying "unusual" databases that `next.jdbc` does not already know about,
* `:classname` -- an optional string that identifies the name of the JDBC driver class to be used for the connection; for common database types, `next.jdbc` knows the default so this should only be needed for "exotic" database types,
* `:user` -- an optional string that identifies the database username to be used when authenticating (NOTE: HikariCP needs `:username` instead – see below),
* `:password` -- an optional string that identifies the database password to be used when authenticating.
If you already have a JDBC URL, you can either specify that string _instead_ of a "db spec" hash map or, if you need additional properties passed to the JDBC driver, you can use a hash map containing `:jdbcUrl`, specifying the JDBC URL, and any properties you need as additional keys in the hash map.
Any additional keys provided in the "db spec" will be passed to the JDBC driver as `Properties` when each connection is made. Alternatively, when used with `next.jdbc.connection/->pool`, additional keys correspond to setters called on the pooled connection object.
If you are using HikariCP and `next.jdbc.connection/->pool` to create a connection pooled datasource, you need to provide `:username` for the database username (instead of, or as well as, `:user`).
Any path that calls `get-connection` will accept the following options:
* `:auto-commit` -- a `Boolean` that determines whether operations on this connection should be automatically committed (the default, `true`) or not; note that setting `:auto-commit false` is commonly required when you want to stream result set data from a query (along with fetch size etc -- see below),
* `:read-only` -- a `Boolean` that determines whether the operations on this connection should be read-only or not (the default, `false`).
* `:connection` -- a hash map of camelCase properties to set on the `Connection` object after it is created; these correspond to `.set*` methods on the `Connection` class and are set via the Java reflection API (using `org.clojure/java.data`). If `:autoCommit` or `:readOnly` are provided, they will take precedence over the fast, specific options above.
If you need additional options set on a connection, you can either use Java interop to set them directly, or provide them as part of the "db spec" hash map passed to `get-datasource` (although then they will apply to _all_ connections obtained from that datasource).
> Note: If `plan`, `execute!`, or `execute-one!` are passed a `DataSource`, a "db spec" hash map, or a JDBC URL string, they will call `get-connection`, so they will accept the above options in those cases.
## Generating SQL
Except for `query` (which is simply an alias for `execute!`), all the "friendly" SQL functions accept the following options (in addition to all the options that `plan`, `execute!`, and `execute-one!` can accept):
* `:table-fn` -- the quoting function to be used on the string that identifies the table name, if provided; this also applies to assumed table names when `nav`igating schemas,
* `:column-fn` -- the quoting function to be used on any string that identifies a column name, if provided; this also applies to the reducing function context over `plan` and to assumed foreign key column names when `nav`igating schemas.
* `:name-fn` -- may be provided as `next.jdbc.sql.builder/qualified-name` to preserve qualifiers on table and column names; you will need to provide `:table-fn` and/or `:column-fn` as well, in order to quote qualified names properly; new in 1.3.955.
They also support a `:suffix` argument which can be used to specify a SQL string that should be appended to the generated SQL string before executing it, e.g., `:suffix "FOR UPDATE"` or, for an `insert!` call `:suffix "RETURNING *"`.
The latter is particularly useful for databases, such as SQLite these days,
which do not support calling `.getGeneratedKeys()` on `PreparedStatement` objects,
so you cannot use `:return-generated-keys` to get back the keys -- you must
use `RETURNING *`.
In addition, `find-by-keys` accepts the following options (see its docstring for more details):
* `:columns` -- specify one or more columns to `SELECT` to override selecting all columns,
* `:order-by` -- specify one or more columns, on which to sort the results,
* `:top` / `:limit` / `:offset` / `:fetch` to support pagination of results.
In the simple case, the `:columns` option expects a vector of keywords and each will be processed according to `:column-fn`, if provided. A column alias can be specified using a vector pair of keywords and both will be processed according to `:column-fn`, e.g., `[:foo [:bar :quux]]` would expand to `foo, bar AS quux`. You can also specify the first element of the pair as a string which will be used as-is in the generated SQL, e.g., `[:foo ["COUNT(*)" :total]]` would expand to `foo, COUNT(*) AS total`. In the latter case, the alias keyword will still be processed according to `:column-fn` but the string will be untouched -- you are responsible for any quoting and/or other formatting that might be required to produce a valid SQL expression.
> Note: `get-by-id` accepts the same options as `find-by-keys` but it will only ever produce one row, as a hash map, so sort order and pagination are less applicable, although `:columns` may be useful.
As of 1.3.925, `aggregate-by-keys` exists as a wrapper around `find-by-keys`
that accepts the same options as `find-by-keys` except that `:columns` may not
be specified (since it is used to add the aggregate to the query).
## Generating Rows and Result Sets
Any function that might realize a row or a result set will accept:
* `:builder-fn` -- a function that implements the `RowBuilder` and `ResultSetBuilder` protocols; strictly speaking, `plan` and `execute-one!` only need `RowBuilder` to be implemented (and `plan` only needs that if it actually has to realize a row) but most generation functions will implement both for ease of use.
* `:label-fn` -- if `:builder-fn` is specified as one of `next.jdbc.result-set`'s `as-modified-*` builders, this option must be present and should specify a string-to-string transformation that will be applied to the column label for each returned column name.
* `:qualifier-fn` -- if `:builder-fn` is specified as one of `next.jdbc.result-set`'s `as-modified-*` builders, this option should specify a string-to-string transformation that will be applied to the table name for each returned column name. It will be called with an empty string if the table name is not available. It can be omitted for the `as-unqualified-modified-*` variants.
* `:column-fn` -- if present, applied to each column name before looking up the column in the `ResultSet` to get that column's value.
* `:name-fn` -- may be provided as `next.jdbc.sql.builder/qualified-name` to preserve qualifiers on keyword used as column names; by default, a keyword like `:foo/bar` is treated as `"bar"` when looking up columns in a `ResultSet`; `:name-fn` allows you to refer to column names that contain `/`, which some databases allow; if both `:name-fn` and `:column-fn` are provided, `:name-fn` is applied first to the keyword (to produce a string) and then `:column-fn` is applied to that; new in 1.3.955.
In addition, `execute!` accepts the `:multi-rs true` option to return multiple result sets -- as a vector of result sets.
> Note: Subject to the caveats above about `:builder-fn`, that means that `plan`, `execute!`, `execute-one!`, and the "friendly" SQL functions will all accept these options for generating rows and result sets.
## Datafying & Navigating Rows and Result Sets
Any function that produces a result set will accept the following options
that modify the behavior of `datafy` and `nav` applied to the rows in that
result set:
* `:schema` -- override the conventions for identifying foreign keys and the related (primary) keys in the tables to which they refer, on a per table/column basis; can also be used to indicate a fk relationship is one-to-many or many-to-many rather than one-to-one or one-to-many,
* `:schema-opts` -- override the default conventions for identifying foreign keys and the related (primary) keys in the tables to which they refer, as a whole.
See [`datafy`, `nav`, and `:schema`](/doc/datafy-nav-and-schema.md) for more details.
## Statements & Prepared Statements
Any function that creates a `Statement` or a `PreparedStatement` will accept the following options (see below for additional options for `PreparedStatement`):
* `:concurrency` -- a keyword that specifies the concurrency level: `:read-only`, `:updatable`,
* `:cursors` -- a keyword that specifies whether cursors should be closed or held over a commit: `:close`, `:hold`,
* `:fetch-size` -- an integer that guides the JDBC driver in terms of how many rows to fetch at once; the actual behavior of specifying `:fetch-size` is database-specific: some JDBC drivers use a zero or negative value here to trigger streaming of result sets -- other JDBC drivers require this to be positive for streaming and may require additional options to be set on the connection _as well_,
* `:max-rows` -- an integer that tells the JDBC driver to limit result sets to this many rows,
* `:result-type` -- a keyword that affects how the `ResultSet` can be traversed: `:forward-only`, `:scroll-insensitive`, `:scroll-sensitive`,
* `:timeout` -- an integer that specifies the (query) timeout allowed for SQL operations, in seconds. See [**Handling Timeouts**](/doc/tips-and-tricks.md#handling-timeouts) in **Tips & Tricks** for more details on this and other possible timeout settings.
* `:statement` -- a hash map of camelCase properties to set on the `Statement` or `PreparedStatement` object after it is created; these correspond to `.set*` methods on the `Statement` class (which `PreparedStatement` inherits) and are set via the Java reflection API (using `org.clojure/java.data`). If `:fetchSize`, `:maxRows`, or `:queryTimeout` are provided, they will take precedence over the fast, specific options above.
If you specify either `:concurrency` or `:result-type`, you must specify _both_ of them. If you specify `:cursors`, you must also specify `:result-type` _and_ `:concurrency`.
> Note: For MS SQL Server to return table names (for qualified column names), you must specify `:result-type` with one of the scroll values (and so you must also specify `:concurrency`).
Any function that creates a `PreparedStatement` will additionally accept the following options:
* `:return-keys` -- a truthy value asks that the JDBC driver to return any generated keys created by the operation; it can be `true` or it can be a vector of keywords identifying column names that should be returned.
Not all databases or drivers support all of these options, or all values for any given option. If `:return-keys` is a vector of column names and that is not supported, `next.jdbc` will attempt a generic "return generated keys" option instead. If that is not supported, `next.jdbc` will fall back to a regular SQL operation. If other options are not supported, you may get a `SQLException`.
You may need to use `RETURNING *` on `INSERT` statements instead of using `:return-keys` with some database drivers.
> Note: If `plan`, `execute!`, or `execute-one!` are passed a `DataSource`, a "db spec" hash map, or a JDBC URL string, they will call `prepare` to create a `PreparedStatement`, so they will accept the above options in those cases.
In addition to the above, `next.jdbc/execute-batch!` (which may create a `PreparedStatement` if you pass in a SQL string and either a `Connection` or `DataSource`) accepts an options hash map that can also contain the following:
* `:batch-size` -- an integer that determines how to partition the parameter groups for submitting to the database in batches,
* `:large` -- a Boolean flag that indicates whether the batch will produce large update counts (`long` rather than `int` values),
* `:return-generated-keys` -- a Boolean flag that indicates whether `.getGeneratedKeys` should be called on the `PreparedStatement` after each batch is executed (if `true`, `execute-batch!` will return a vector of hash maps containing generated keys). Some databases do not support this and you need to use `RETURNING *` on `INSERT` statements instead.
## Transactions
The `transact` function and `with-transaction` (`+options`) macro accept the following options:
* `:isolation` -- a keyword that identifies the isolation to be used for this transaction: `:none`, `:read-committed`, `:read-uncommitted`, `:repeatable-read`, or `:serializable`; these represent increasingly strict levels of transaction isolation and may not all be available depending on the database and/or JDBC driver being used,
* `:read-only` -- a `Boolean` that indicates whether the transaction should be read-only or not (the default),
* `:rollback-only` -- a `Boolean` that indicates whether the transaction should commit on success (the default) or rollback.
## Plan Selection
The `next.jdbc.plan/select!` function accepts the following specific option:
* `:into` -- a data structure into which the selected result from a `plan` operation are poured; by default this is `[]`; could be any value that is acceptable as the first argument to `into`, subject to `into` accepting the sequence of values produced by the `plan` reduction.
next-jdbc-1.3.955/doc/cljdoc.edn 0000664 0000000 0000000 00000001476 14700603111 0016250 0 ustar 00root root 0000000 0000000 {:cljdoc.doc/tree [["Readme" {:file "README.md"}]
["Changes" {:file "CHANGELOG.md"}]
["Getting Started" {:file "doc/getting-started.md"}
["Friendly SQL Functions" {:file "doc/friendly-sql-functions.md"}]
["Tips & Tricks" {:file "doc/tips-and-tricks.md"}]
["Result Set Builders" {:file "doc/result-set-builders.md"}]
["Prepared Statements" {:file "doc/prepared-statements.md"}]
["Transactions" {:file "doc/transactions.md"}]]
["All The Options" {:file "doc/all-the-options.md"}]
["datafy, nav, and :schema" {:file "doc/datafy-nav-and-schema.md"}]
["Migration from clojure.java.jdbc" {:file "doc/migration-from-clojure-java-jdbc.md"}]]}
next-jdbc-1.3.955/doc/datafy-nav-and-schema.md 0000664 0000000 0000000 00000030304 14700603111 0020664 0 ustar 00root root 0000000 0000000 # `datafy`, `nav`, and the `:schema` option
Clojure 1.10 introduced a new namespace, [`clojure.datafy`](http://clojure.github.io/clojure/clojure.datafy-api.html), and two new protocols (`Datafiable` and `Navigable`) that allow for generalized, lazy navigation around data structures. Cognitect also released REBL (now Nubank's [Morse](https://github.com/nubank/morse)) -- a graphical, interactive tool for browsing Clojure data structures, based on the new `datafy` and `nav` functions.
Shortly after REBL's release, I added experimental support to `clojure.java.jdbc` for `datafy` and `nav` that supported lazy navigation through result sets into foreign key relationships and connected rows and tables. `next.jdbc` bakes that support into result sets produced by `execute!` and `execute-one!`.
In addition to `datafy` and `nav` support in the result sets, as of version 1.0.462, there is a `next.jdbc.datafy` namespace that can be required to extend these protocols to a number of JDBC object types. See **JDBC Datafication** near the end of this page for more detail of this.
Additional tools that understand `datafy` and `nav` include [Portal](https://github.com/djblue/portal) and [Reveal](https://github.com/vlaaad/reveal).
## The `datafy`/`nav` Lifecycle on Result Sets
Here's how the process works, for result sets produced by `next.jdbc`:
* `execute!` and `execute-one!` produce result sets containing rows that are `Datafiable`,
* Tools like Portal, Reveal, and Morse can call `datafy` on result sets to render them as "pure data" (which they already are, but this makes them also `Navigable`),
* Tools like Portal, Reveal, and Morse allow users to "drill down" into elements of rows in the "pure data" result set, using `nav`,
* If a column in a row represents a foreign key into another table, calling `nav` will fetch the related row(s),
* Those can in turn be `datafy`'d and `nav`'d to continue drilling down through connected data in the database.
In addition to `execute!` and `execute-one!`, you can call `next.jdbc.result-set/datafiable-result-set` on any `ResultSet` object to produce a result set whose rows are `Datafiable`. Inside a reduction over the result of `plan`, you can call `next.jdbc.result-set/datafiable-row` on a row to produce a `Datafiable` row. That will realize the entire row, including generating column names using the row builder specified (or `as-maps` by default).
### Identifying Foreign Keys
By default, `next.jdbc` assumes that a column named `id` or `_id` is a foreign key into a table called `` with a primary key called `id`. As an example, if you have a table `address` which has columns `id` (the primary key), `name`, `email`, etc, and a table `contact` which has various columns including `addressid`, then if you retrieve a result set based on `contact`, call `datafy` on it and then "drill down" into the columns, when `(nav row :contact/addressid v)` is called (where `v` is the value of that column in that row) `next.jdbc`'s implementation of `nav` will fetch a single row from the `address` table, identified by `id` matching `v`.
You can override this default behavior for any column in any table by providing a `:schema` option that is a hash map whose keys are column names (usually the table-qualified keywords that `next.jdbc` produces by default) and whose values are table-qualified keywords, optionally wrapped in vectors, that identity the name of the table to which that column is a foreign key and the name of the key column within that table.
As of 1.3.909, you can also override this behavior via the `:schema-opts`
option. This is a hash map whose keys can be:
* `:fk-suffix` -- a string used instead of `"id"` to identify foreign keys,
* `:pk` -- a string used instead of `"id"` for the primary key column in the target table,
* `:pk-fn` -- a function that takes the table name and the value of `:pk` and returns the name of the primary key column in the target table, instead of just using the value of `:pk` (the default is effectively `(constantly )`).
For `:fk-suffix`, the `_` is still permitted and optional in the column name,
so if you specified `:schema-opts {:fk-suffix "fk"}` then `addressfk` and
`address_fk` would both be treated as foreign keys into the `address` table.
_Note: as of 1.3.939, `-` is permitted in key names (in addition to `_`) so that kebab result set builders work as expected._
The `:pk-fn` can use the table name to determine the primary key column name
for exceptions to the `:pk` value. For example, if you have a table `address`
with a primary key column `address_id` instead of `id`, you could use:
```clojure
:pk-fn (fn [table pk]
(if (= "address" table)
"address_id"
pk))
```
The default behavior in the example above is equivalent to this `:schema` value:
```clojure
(jdbc/execute! ds
["select * from contact where city = ?" "San Francisco"]
;; a one-to-one or many-to-one relationship
{:schema {:contact/addressid :address/id}})
```
or these `:schema-opts` values:
```clojure
(jdbc/execute! ds
["select * from contact where city = ?" "San Francisco"]
;; a one-to-one or many-to-one relationship
{:schema-opts {:fk-suffix "id" :pk "id"
:pk-fn (constantly "id")}})
```
If you had a table to track the valid/bouncing status of email addresses over time, `:deliverability`, where `email` is the non-unique key, you could provide automatic navigation into that using:
```clojure
(jdbc/execute! ds
["select * from contact where city = ?" "San Francisco"]
;; one-to-many or many-to-many
{:schema {:contact/addressid :address/id
:address/email [:deliverability/email]}})
```
Since this relies on a foreign key that does not follow a standard suffix
pattern, there is no comparable `:schema-opts` version. In addition, the
`:schema-opts` approach cannot designate a one-to-many or many-to-many
relationship.
When you indicate a `*-to-many` relationship, by wrapping the foreign table/key in a vector, `next.jdbc`'s implementation of `nav` will fetch a multi-row result set from the target table.
If you use foreign key constraints in your database, you could probably generate this `:schema` data structure automatically from the metadata in your database. Similarly, if you use a library that depends on an entity relationship map (such as [seql](https://github.com/exoscale/seql) or [walkable](https://walkable.gitlab.io/)), then you could probably generate this `:schema` data structure from that entity map.
### Behind The Scenes
Making rows datafiable is implemented by adding metadata to each row with a key of `clojure.core.protocols/datafy` and a function as the value. That function closes over the connectable and options passed in to the `execute!` or `execute-one!` call that produced the result set containing those rows.
When called (`datafy` on a row), it adds metadata to the row with a key of `clojure.core.protocols/nav` and another function as the value. That function also closes over the connectable and options passed in.
When that is called (`nav` on a row, column name, and column value), if a
`:schema` entry exists for that column or it matches the convention described
above (either by default or via `:schema-opts`), then it will fetch row(s)
using `next.jdbc`'s `Executable` functions `-execute-one` or `-execute-all`,
passing in the connectable and options closed over.
The protocol `next.jdbc.result-set/DatafiableRow` has a default implementation of `datafiable-row` for `clojure.lang.IObj` that just adds the metadata to support `datafy`. There is also an implementation baked into the result set handling behind `plan` so that you can call `datafiable-row` directly during reduction and get a fully-realized row that can be `datafy`'d (and then `nav`igated).
In addition, you can call `next.jdbc.result-set/datafiable-result-set` on any `ResultSet` object and get a fully realized, datafiable result set created using any of the result set builders.
## JDBC Datafication
If you require `next.jdbc.datafy`, the `Datafiable` protocol is extended to several JDBC object types, so that calling `datafy` will turn them into hash maps according to Java Bean introspection, similar to `clojure.core/bean` although `next.jdbc` uses `clojure.java.data/from-java-shallow` (from [`org.clojure/java.data`](https://github.com/clojure/java.data)), with some additions as described below.
* `java.sql.Connection` -- datafies as a bean; The `:metaData` property is a `java.sql.DatabaseMetaData`, which is also datafiable.
* `DatabaseMetaData` -- datafies as a bean, with an additional `:all-tables` property (that is a dummy object); six properties are navigable to produce fully-realized datafiable result sets:
* `all-tables` -- produced from `(.getTables this nil nil nil nil)`, this is all the tables and views available from the connection that produced the database metadata,
* `catalogs` -- produced from `(.getCatalogs this)`
* `clientInfoProperties` -- all the client properties that the database driver supports,
* `schemas` -- produced from `(.getSchemas this)`,
* `tableTypes` -- produced from `(.getTableTypes this)`,
* `typeInfo` -- produced from `(.getTypeInfo this)`.
* `ParameterMetaData` -- datafies as a vector of parameter descriptions; each parameter hash map has: `:class` (the name of the parameter class -- JVM), `:mode` (one of `:in`, `:in-out`, or `:out`), `:nullability` (one of: `:null`, `:not-null`, or `:unknown`), `:precision`, `:scale`, `:type` (the name of the parameter type -- SQL), and `:signed` (Boolean).
* `ResultSet` -- datafies as a bean; if the `ResultSet` has an associated `Statement` and that in turn has an associated `Connection` then an additional key of `:rows` is provided which is a datafied result set, from `next.jdbc.result-set/datafiable-result-set` with default options. This is provided as a convenience, purely for datafication of other JDBC data types -- in normal `next.jdbc` usage, result sets are datafied under full user control.
* `ResultSetMetaData` -- datafies as a vector of column descriptions; each column hash map has: `:auto-increment`, `:case-sensitive`, `:catalog`, `:class` (the name of the column class -- JVM), `:currency` (Boolean), `:definitely-writable`, `:display-size`, `:label`, `:name`, `:nullability`, `:precision`, `:read-only`, `:searchable`, `:signed`, `:scale`, `:schema`, `:table`, `:type`, and `:writable`.
* `Statement` -- datafies as a bean.
See the Java documentation for these JDBC types for further details on what all the properties from each of these classes mean and which are `int`, `String`, or some other JDBC object type.
In addition, requiring this namespace will affect how `next.jdbc.result-set/metadata` behaves inside the reducing function applied to the result of `plan`. Without this namespace loaded, that function will return a raw `ResultSetMetaData` object (which must not leak outside the reducing function). With this namespace loaded, that function will, instead, return a Clojure data structure describing the columns in the result set.
### SQLite
For some strange reason, SQLite has implemented their `ResultSetMetaData` as also
being a `ResultSet` which leads to ambiguity when datafying some things when
using SQLite. `next.jdbc` currently assumes that if it is asked to `datafy` a
`ResultSet` and that object is _also_ `ResultSetMetaData`, it will treat it
purely as `ResultSetMetaData`, which produces a vector of column metadata as
described above. However, there are some results in SQLite's JDBC driver that
look like `ResultSetMetaData` but should be treated as plain `ResultSet`
objects (which is what other databases' JDBC drivers return).
An example of this is what happens when you try to `datafy` the result of
calling `DatabaseMetaData.getTables()`: the JDBC documentation says you get
back a `ResultSet` but in SQLite, that is also an instance of `ResultSetMetaData`
and so `next.jdbc.datafy` treats it that way instead of as a plain `ResultSet`.
You can call `next.jdbc.result-set/datafiable-result-set` directly in this
case to get the rows as a hash map (although you won't get the underlying
metadata as a bean).
See issue [#212](https://github.com/seancorfield/next-jdbc/issues/212) for more details.
next-jdbc-1.3.955/doc/friendly-sql-functions.md 0000664 0000000 0000000 00000044772 14700603111 0021271 0 ustar 00root root 0000000 0000000 # Friendly SQL Functions
In [Getting Started](/doc/getting-started.md), we used `execute!` and `execute-one!` for all our SQL operations, except when we were reducing a result set. These functions (and `plan`) all expect a "connectable" and a vector containing a SQL string followed by any parameter values required.
A "connectable" can be a `javax.sql.DataSource`, a `java.sql.Connection`, or something that can produce a datasource (when `get-datasource` is called on it). It can also be a `java.sql.PreparedStatement` but we'll cover that a bit later...
Because string-building isn't always much fun, `next.jdbc.sql` also provides some "friendly" functions for basic CRUD operations:
* `insert!` and `insert-multi!` -- for inserting one or more rows into a table -- "Create",
* `query` -- an alias for `execute!` when using a vector of SQL and parameters -- "Read",
* `update!` -- for updating one or more rows in a table -- "Update",
* `delete!` -- for deleting one or more rows in a table -- "Delete".
as well as these more specific "read" operations:
* `find-by-keys` -- a query on one or more column values, specified as a hash map or `WHERE` clause,
* `get-by-id` -- a query to return a single row, based on a single column value, usually the primary key.
These functions are described in more detail below. They are deliberately simple and intended to cover only the most common, basic SQL operations. The primary API (`plan`, `execute!`, `execute-one!`) is the recommended approach for everything beyond that. If you need more expressiveness, consider one of the following libraries to build SQL/parameter vectors, or run queries:
* [HoneySQL](https://github.com/seancorfield/honeysql) -- a composable DSL for creating SQL/parameter vectors from Clojure data structures
* [seql](https://github.com/exoscale/seql) -- a simplified EQL-inspired query language, built on `next.jdbc` (as of release 0.1.6)
* [SQLingvo](https://github.com/r0man/sqlingvo) -- a composable DSL for creating SQL/parameter vectors
* [Walkable](https://github.com/walkable-server/walkable) -- full EQL query language support for creating SQL/parameter vectors
If you prefer to write your SQL separately from your code, take a look at [HugSQL](https://github.com/layerware/hugsql) -- [HugSQL documentation](https://www.hugsql.org/) -- which has a `next.jdbc` adapter, as of version 0.5.1. See below for a "[quick start](#hugsql-quick-start)" for using HugSQL with `next.jdbc`.
As of 1.3.925, `aggregate-by-keys` exists as a wrapper around `find-by-keys`
that accepts the same options as `find-by-keys` and an aggregate SQL expression
and it returns a single value (the aggregate). `aggregate-by-keys` accepts the
same options as `find-by-keys` except that `:columns` may not be specified
(since it is used to add the aggregate to the query).
## `insert!`
Given a table name (as a keyword) and a hash map of column names and values, this performs a single row insertion into the database:
```clojure
(sql/insert! ds :address {:name "A. Person" :email "albert@person.org"})
;; equivalent to
(jdbc/execute-one! ds ["INSERT INTO address (name,email) VALUES (?,?)"
"A.Person" "albert@person.org"] {:return-keys true})
;; some databases may require this instead
(jdbc/execute-one! ds ["INSERT INTO address (name,email) VALUES (?,?) RETURNING *"
"A.Person" "albert@person.org"])
;; which you can achieve with the :suffix option
(sql/insert! ds :address {:name "A. Person" :email "albert@person.org"}
{:suffix "RETURNING *"})
```
If you have multiple rows (hash maps) to insert and they all have the same
set of keys, you can use `insert-multi!` instead (see below), which will
perform a single multi-row insertion, which will generally be faster.
## `insert-multi!`
Given a table name (as a keyword), a vector of column names, and a vector of
row value vectors, this performs a single multi-row insertion into the database:
```clojure
(sql/insert-multi! ds :address
[:name :email]
[["Stella" "stella@artois.beer"]
["Waldo" "waldo@lagunitas.beer"]
["Aunt Sally" "sour@lagunitas.beer"]])
;; equivalent to
(jdbc/execute! ds ["INSERT INTO address (name,email) VALUES (?,?), (?,?), (?,?)"
"Stella" "stella@artois.beer"
"Waldo" "waldo@lagunitas.beer"
"Aunt Sally" "sour@lagunitas.beer"] {:return-keys true})
```
All the row vectors must be the same length, and must match the number of
columns specified.
Given a table name (as a keyword) and a vector of hash maps, this performs a
single multi-row insertion into the database:
```clojure
(sql/insert-multi! ds :address
[{:name "Stella", :email "stella@artois.beer"}
{:name "Waldo", :email "waldo@lagunitas.beer"}
{:name "Aunt Sally", :email "sour@lagunitas.beer"}])
;; equivalent to
(jdbc/execute! ds ["INSERT INTO address (name,email) VALUES (?,?), (?,?), (?,?)"
"Stella" "stella@artois.beer"
"Waldo" "waldo@lagunitas.beer"
"Aunt Sally" "sour@lagunitas.beer"] {:return-keys true})
```
All the hash maps must have the same set of keys, so that the vector of hash
maps can be converted to a vector of columns names and a vector of row value
vectors, as above, so a single multi-row insertion can be performed.
If you wish to insert multiple hash maps that do not have identical keys, you
need to iterate over `insert!` and insert one row at a time, which will
generally be much slower.
> Note: both of these expand to a single SQL statement with placeholders for every
value being inserted -- for large sets of rows, this may exceed the limits
on SQL string size and/or number of parameters for your JDBC driver or your
database. Several databases have a limit of 1,000 parameter placeholders.
Oracle does not support this form of multi-row insert, requiring a different
syntax altogether.
### Batch Insertion
As of release 1.2.790, you can specify `:batch true` in the options, which
will use `execute-batch!` under the hood, instead of `execute!`, as follows:
```clojure
(sql/insert-multi! ds :address
[:name :email]
[["Stella" "stella@artois.beer"]
["Waldo" "waldo@lagunitas.beer"]
["Aunt Sally" "sour@lagunitas.beer"]]
{:batch true})
;; equivalent to
(jdbc/execute-batch! ds
["INSERT INTO address (name,email) VALUES (?,?)"
["Stella" "stella@artois.beer"]
["Waldo" "waldo@lagunitas.beer"]
["Aunt Sally" "sour@lagunitas.beer"]]
{:return-keys true :return-generated-keys true})
;; and
(sql/insert-multi! ds :address
[:name :email]
[{:name "Stella", :email "stella@artois.beer"}
{:name "Waldo", :email "waldo@lagunitas.beer"}
{:name "Aunt Sally", :email "sour@lagunitas.beer"}]
{:batch true})
;; equivalent to
(jdbc/execute-batch! ds
["INSERT INTO address (name,email) VALUES (?,?)"
["Stella" "stella@artois.beer"]
["Waldo" "waldo@lagunitas.beer"]
["Aunt Sally" "sour@lagunitas.beer"]]
{:return-keys true :return-generated-keys true})
```
> Note: not all databases or drivers support returning generated keys like this -- see [**Batched Parameters**](https://cljdoc.org/d/com.github.seancorfield/next.jdbc/CURRENT/doc/getting-started/prepared-statements#caveats) for caveats and possible database-specific behaviors. You may need `RETURNING *` in your SQL instead.
## `query`
Given a vector of SQL and parameters, execute it:
```clojure
(sql/query ds ["select * from address where name = ?" "Stella"])
;; equivalent to
(jdbc/execute! ds ["SELECT * FROM address WHERE name = ?" "Stella"])
```
Note that the single argument form of `execute!`, taking just a `PreparedStatement`, is not supported by `query`.
## `update!`
Given a table name (as a keyword), a hash map of columns names and values to set, and either a hash map of column names and values to match on or a vector containing a partial `WHERE` clause and parameters, perform an update operation on the database:
```clojure
(sql/update! ds :address {:name "Somebody New"} {:id 2})
;; equivalent to
(sql/update! ds :address {:name "Somebody New"} ["id = ?" 2])
;; equivalent to
(jdbc/execute-one! ds ["UPDATE address SET name = ? WHERE id = ?"
"Somebody New" 2])
```
## `delete!`
Given a table name (as a keyword) and either a hash map of column names and values to match on or a vector containing a partial `WHERE` clause and parameters, perform a delete operation on the database:
```clojure
(sql/delete! ds :address {:id 8})
;; equivalent to
(sql/delete! ds :address ["id = ?" 8])
;; equivalent to
(jdbc/execute-one! ds ["DELETE FROM address WHERE id = ?" 8])
```
## `find-by-keys`
Given a table name (as a keyword) and either a hash map of column names and values to match on or a vector containing a partial `WHERE` clause and parameters, execute a query on the database:
```clojure
(sql/find-by-keys ds :address {:name "Stella" :email "stella@artois.beer"})
;; equivalent to
(sql/find-by-keys ds :address ["name = ? AND email = ?"
"Stella" "stella@artois.beer"])
;; equivalent to
(jdbc/execute! ds ["SELECT * FROM address WHERE name = ? AND email = ?"
"Stella" "stella@artois.beer"])
```
While the hash map approach -- "query by example" -- is great for equality
comparisons, sometimes you need other types of comparisons. For example, you
might want to find all the rows where the email address ends in `.beer`:
```clojure
(sql/find-by-keys ds :address ["email LIKE ?" "%.beer"])
;; equivalent to
(jdbc/execute! ds ["SELECT * FROM address WHERE email LIKE ?" "%.beer"])
```
Or you may want to find all the rows where the name is one of a specific
set of values:
```clojure
(sql/find-by-keys ds :address ["name IN (?,?)" "Stella" "Waldo"])
;; equivalent to
(jdbc/execute! ds ["SELECT * FROM address WHERE name IN (?,?)" "Stella" "Waldo"])
```
The default behavior is to return all the columns in each row. You can specify a subset of columns to return using the `:columns` option. It takes a vector and each element of the vector can be:
* a simple keyword representing the column name (`:column-fn` will be applied, if provided),
* a pair of keywords representing the column name and an alias (`:column-fn` will be applied to both, if provided),
* a pair consisting of a string and a keyword, representing a SQL expression and an alias (`:column-fn` will be applied to the alias, if provided).
```clojure
(sql/find-by-keys ds :address {:name "Stella"} {:columns [[:email :address]]})
;; equivalent to
(jdbc/execute! ds ["SELECT email AS address FROM address WHERE name = ?"
"Stella"])
(sql/find-by-keys ds :address {:name "Stella"} {:columns [["count(*)" :n]]})
;; equivalent to
(jdbc/execute! ds ["SELECT count(*) AS n FROM address WHERE name = ?"
"Stella"])
```
> Note: the SQL string provided for a column is copied exactly as-is into the generated SQL -- you are responsible for ensuring it is legal SQL!
`find-by-keys` supports an `:order-by` option which can specify a vector of column names to sort the results by. Elements may be column names or pairs of a column name and the direction to sort: `:asc` or `:desc`:
```clojure
(sql/find-by-keys ds :address
{:name "Stella" :email "stella@artois.beer"}
{:order-by [[:id :desc]]})
;; equivalent to
(jdbc/execute! ds ["SELECT * FROM address WHERE name = ? AND email = ? ORDER BY id DESC"
"Stella" "stella@artois.beer"])
```
`find-by-keys` also supports basic pagination with `:offset` and `:fetch` options which both accept numeric values and adds `OFFSET ? ROWS FETCH NEXT ? ROWS ONLY` to the generated query. To support MySQL and SQLite, you can specify `:limit` instead `:fetch` which adds `LIMIT ? OFFSET ?` to the generated query instead.
If you want to match all rows in a table -- perhaps with the pagination options in effect -- you can pass the keyword `:all` instead of either a hash map of column names and values or a vector containing a partial `WHERE` clause and parameters.
```clojure
(sql/find-by-keys ds :address :all {:order-by [:id] :offset 5 :fetch 10})
;; equivalent to
(jdbc/execute! ds ["SELECT * FROM address ORDER BY id OFFSET ? ROWS FETCH NEXT ? ROWS ONLY" 5 10])
```
If no rows match, `find-by-keys` returns `[]`, just like `execute!`.
## `aggregate-by-keys`
Added in 1.3.925, this is a wrapper around `find-by-keys` that makes it easier
to perform aggregate queries::
```clojure
(sql/aggregate-by-keys ds :address "count(*)" {:name "Stella"
:email "stella@artois.beer"})
;; is roughly equivalent to
(-> (sql/find-by-keys ds :address {:name "Stella" :email "stella@artois.beer"}
{:columns [["count(*)" :next_jdbc_aggregate_123]]})
(first)
(get :next_jdbc_aggregate_123))
```
(where `:next_jdbc_aggregate_123` is a unique alias generated by `next.jdbc`,
derived from the aggregate expression string).
> Note: the SQL string provided for the aggregate is copied exactly as-is into the generated SQL -- you are responsible for ensuring it is legal SQL!
## `get-by-id`
Given a table name (as a keyword) and a primary key value, with an optional primary key column name, execute a query on the database:
```clojure
(sql/get-by-id ds :address 2)
;; equivalent to
(sql/get-by-id ds :address 2 {}) ; empty options map
;; equivalent to
(sql/get-by-id ds :address 2 :id {}) ; empty options map
;; equivalent to
(jdbc/execute-one! ds ["SELECT * FROM address WHERE id = ?" 2])
```
Note that in order to override the default primary key column name (of `:id`), you need to specify both the column name and an options hash map.
If no rows match, `get-by-id` returns `nil`, just like `execute-one!`.
## Table & Column Entity Names
By default, `next.jdbc.sql` functions construct SQL strings with the entity names exactly matching the (unqualified) keywords provided. If you are trying to use a table name or column name that is a reserved name in SQL for your database, you will need to tell those functions to quote those names.
The namespace `next.jdbc.quoted` provides five functions that cover the most common types of entity quoting, and a modifier function for quoting dot-separated names (e.g., that include schemas):
* `ansi` -- wraps entity names in double quotes,
* `mysql` -- wraps entity names in back ticks,
* `sql-server` -- wraps entity names in square brackets,
* `oracle` -- an alias for `ansi`,
* `postgres` -- an alias for `ansi`.
* `schema` -- wraps a quoting function to support `dbo.table` style entity names.
These quoting functions can be provided to any of the friendly SQL functions above using the `:table-fn` and `:column-fn` options, in a hash map provided as the (optional) last argument in any call. If you want to provide your own entity naming function, you can do that:
```clojure
(defn snake-case [s] (str/replace s #"-" "_"))
(sql/insert! ds :my-table {:some "data"} {:table-fn snake-case})
```
`next.jdbc` provides `snake-kebab-opts` and `unqualified-snake-kebab-opts` which are hash maps containing `:column-fn` and `:table-fn` that use the `->snake_case` function from the [camel-snake-kebab library](https://github.com/clj-commons/camel-snake-kebab/) which performs a more sophisticated transformation:
```clojure
;; transforms :my-table to my_table as above but will also transform
;; column names; in addition, it will perform the reverse transformation
;; on any results, e.g., turning MySQL's :GENERATED_KEY into :generated-key
(sql/insert! ds :my-table {:some "data"} jdbc/snake-kebab-opts)
```
> Note: The entity naming function is passed a string, the result of calling `name` on the keyword passed in. Also note that the default quoting functions do not handle schema-qualified names, such as `dbo.table_name` -- `sql-server` would produce `[dbo.table_name]` from that. Use the `schema` function to wrap the quoting function if you need that behavior, e.g,. `{:table-fn (schema sql-server)}` which would produce `[dbo].[table_name]`.
## HugSQL Quick Start
Here's how to get up and running quickly with `next.jdbc` and HugSQL. For more detail, consult the [HugSQL documentation](https://www.hugsql.org/). Add the following dependencies to your project (in addition to `com.github.seancorfield/next.jdbc` and whichever JDBC drivers you need):
```clojure
com.layerware/hugsql-core {:mvn/version "0.5.3"}
com.layerware/hugsql-adapter-next-jdbc {:mvn/version "0.5.3"}
```
_Check the HugSQL documentation for the latest versions to use!_
Write your SQL in `.sql` files that are on the classpath (somewhere under `src` or `resources`). For our purposes, assume a SQL file `db/example.sql` containing your first set of definitions. In your namespace, add these `require`s:
```clojure
[hugsql.core :as hugsql]
[hugsql.adapter.next-jdbc :as adapter]
[next.jdbc :as jdbc]
```
At program startup you'll need to call these functions (either at the top-level of your namespace on inside your initialization function):
```clojure
;; regular SQL functions
(hugsql/def-db-fns "db/example.sql"
{:adapter (adapter/hugsql-adapter-next-jdbc)})
;; development/advanced usage functions that produce a vector containing
;; SQL and parameters that could be passed to jdbc/execute! etc
(hugsql/def-sqlvec-fns "db/example.sql"
{:adapter (adapter/hugsql-adapter-next-jdbc)})
```
Those calls will add function definitions to that namespace based on what is in the `.sql` files. Now set up your db-spec and datasource as usual with `next.jdbc`:
```clojure
(def db-spec {:dbtype "h2:mem" :dbname "example"}) ; assumes H2 driver in deps.edn
(def ds (jdbc/get-datasource db-spec))
```
Borrowing from Princess Bride examples from the HugSQL documentation, you can now do things like this:
```clojure
(create-characters-table ds)
;;=> [#:next.jdbc{:update-count 0}]
(insert-character ds {:name "Westley", :specialty "love"})
;;=> 1
```
By default, for compatibility with their default adapter (`clojure.java.jdbc`), the `next.jdbc` adapter uses the `next.jdbc.result-set/as-unqualified-lower-maps` builder function. You can specify a different builder function when you pass in the adapter:
```clojure
;; add require next.jdbc.result-set :as rs to your ns
(hugsql/def-db-fns "db/example.sql"
{:adapter (adapter/hugsql-adapter-next-jdbc
{:builder-fn rs/as-maps})})
;; now you'll get qualified as-is hash maps back:
(character-by-id ds {:id 1})
;;=> #:CHARACTERS{:ID 1, :NAME "Westley", :SPECIALTY "love", :CREATED_AT #inst "2019-09-27T18:52:54.413000000-00:00"}
```
next-jdbc-1.3.955/doc/getting-started.md 0000664 0000000 0000000 00000136375 14700603111 0017760 0 ustar 00root root 0000000 0000000 # Getting Started with next.jdbc
The `next.jdbc` library provides a simpler, faster alternative to the [`clojure.java.jdbc`](https://github.com/clojure/java.jdbc) Contrib library and is the next step in the evolution of that library.
It is designed to work with Clojure 1.10 or later, supports `datafy`/`nav`, and by default produces hash maps with automatically qualified keywords, indicating source tables and column names (labels), if your database supports that.
## Installation
**You must be using Clojure 1.10 or later.** 1.12.0 is the most recent stable version of Clojure (as of March 15th, 2024).
You can add `next.jdbc` to your project with either:
```clojure
com.github.seancorfield/next.jdbc {:mvn/version "1.3.955"}
```
for `deps.edn` or:
```clojure
[com.github.seancorfield/next.jdbc "1.3.955"]
```
for `project.clj` or `build.boot`.
**In addition, you will need to add dependencies for the JDBC drivers you wish to use for whatever databases you are using. For example:**
* MySQL: `com.mysql/mysql-connector-j {:mvn/version "8.1.0"}` ([search for latest version](https://search.maven.org/artifact/com.mysql/mysql-connector-j))
* PostgreSQL: `org.postgresql/postgresql {:mvn/version "42.6.0"}` ([search for latest version](https://search.maven.org/artifact/org.postgresql/postgresql))
* Microsoft SQL Server: `com.microsoft.sqlserver/mssql-jdbc {:mvn/version "12.4.1.jre11"}` ([search for latest version](https://search.maven.org/artifact/com.microsoft.sqlserver/mssql-jdbc))
* Sqlite: `org.xerial/sqlite-jdbc {:mvn/version "3.43.0.0"}` ([search for latest version](https://search.maven.org/artifact/org.xerial/sqlite-jdbc))
> Note: these are the versions that `next.jdbc` is tested against but there may be more recent versions and those should generally work too -- click the "search for latest version" link to see all available versions of those drivers on Maven Central. You can see the full list of drivers and versions that `next.jdbc` is tested against in [the project's `deps.edn` file](https://github.com/seancorfield/next-jdbc/blob/develop/deps.edn#L10-L27), but many other JDBC drivers for other databases should also work (e.g., Oracle, Red Shift).
## An Example REPL Session
To start using `next.jdbc`, you need to create a datasource (an instance of `javax.sql.DataSource`). You can use `next.jdbc/get-datasource` with either a "db-spec" -- a hash map describing the database you wish to connect to -- or a JDBC URL string. Or you can construct a datasource from one of the connection pooling libraries out there, such as [HikariCP](https://github.com/brettwooldridge/HikariCP) or [c3p0](https://www.mchange.com/projects/c3p0/) -- see [Connection Pooling](#connection-pooling) below.
For the examples in this documentation, we will use a local H2 database on disk, and we'll use the [Clojure CLI tools](https://clojure.org/guides/deps_and_cli) and `deps.edn`:
```clojure
;; deps.edn
{:deps {org.clojure/clojure {:mvn/version "1.12.0"}
com.github.seancorfield/next.jdbc {:mvn/version "1.3.955"}
com.h2database/h2 {:mvn/version "2.2.224"}}}
```
### Create & Populate a Database
In this REPL session, we'll define an H2 datasource, create a database with a simple table, and then add some data and query it:
```clojure
> clj
Clojure 1.12.0
user=> (require '[next.jdbc :as jdbc])
nil
user=> (def db {:dbtype "h2" :dbname "example"})
#'user/db
user=> (def ds (jdbc/get-datasource db))
#'user/ds
user=> (jdbc/execute! ds ["
create table address (
id int auto_increment primary key,
name varchar(32),
email varchar(255)
)"])
[#:next.jdbc{:update-count 0}]
user=> (jdbc/execute! ds ["
insert into address(name,email)
values('Sean Corfield','sean@corfield.org')"])
[#:next.jdbc{:update-count 1}]
user=> (jdbc/execute! ds ["select * from address"])
[#:ADDRESS{:ID 1, :NAME "Sean Corfield", :EMAIL "sean@corfield.org"}]
user=>
```
### The "db-spec" hash map
We described the database with just `:dbtype` and `:dbname` because it is created as a local file and needs no authentication. For most databases, you would need `:user` and `:password` for authentication, and if the database is running on a remote machine you would need `:host` and possibly `:port` (`next.jdbc` tries to guess the correct port based on the `:dbtype`).
> Note: You can see the full list of `:dbtype` values supported in [next.jdbc/get-datasource](https://cljdoc.org/d/com.github.seancorfield/next.jdbc/CURRENT/api/next.jdbc#get-datasource)'s docstring. If you need this programmatically, you can get it from the [next.jdbc.connection/dbtypes](https://cljdoc.org/d/com.github.seancorfield/next.jdbc/CURRENT/api/next.jdbc.connection#dbtypes) hash map. If those lists differ, the hash map is the definitive list (and I'll need to fix the docstring!). The docstring of that Var explains how to tell `next.jdbc` about additional databases.
The hash map can contain arbitrary keys and values: any keys not specifically
recognized by `next.jdbc` will be passed through to the JDBC driver as part
of the connection string. For example, if you specify `:useSSL false`, then
the connection string will have `&useSSL=false` appended to it.
If you already have a JDBC URL (string), you can use that as-is instead of the db-spec hash map. If you have a JDBC URL and still need additional options passed into the JDBC driver, you can use a hash map with the `:jdbcUrl` key specifying the string and whatever additional options you need.
### `execute!` & `execute-one!`
We used `execute!` to create the `address` table, to insert a new row into it, and to query it. In all three cases, `execute!` returns a vector of hash maps with namespace-qualified keys, representing the result set from the operation, if available.
If the result set contains no rows, `execute!` returns an empty vector `[]`.
When no result set is available, `next.jdbc` returns a "result set" containing the "update count" from the operation (which is usually the number of rows affected; note that `:builder-fn` does not affect this fake "result set"). By default, H2 uses uppercase names and `next.jdbc` returns these as-is.
If you only want a single row back -- the first row of any result set, generated keys, or update counts -- you can use `execute-one!` instead. Continuing the REPL session, we'll insert another address and ask for the generated keys to be returned, and then we'll query for a single row:
```clojure
user=> (jdbc/execute-one! ds ["
insert into address(name,email)
values('Someone Else','some@elsewhere.com')
"] {:return-keys true})
#:ADDRESS{:ID 2}
user=> (jdbc/execute-one! ds ["select * from address where id = ?" 2])
#:ADDRESS{:ID 2, :NAME "Someone Else", :EMAIL "some@elsewhere.com"}
user=>
```
Since we used `execute-one!`, we get just one row back (a hash map). This also shows how you provide parameters to SQL statements -- with `?` in the SQL and then the corresponding parameter values in the vector after the SQL string.
If the result set contains no rows, `execute-one!` returns `nil`.
When no result is available, and `next.jdbc` returns a fake "result set" containing the "update count", `execute-one!` returns just a single hash map with the key `next.jdbc/update-count` and the number of rows updated.
In the same way that you would use `execute-one!` if you only want one row or one update count, compared to `execute!` for multiple rows or a vector containing an update count, you can also ask `execute!` to return multiple result sets -- such as might be returned from a stored procedure call, or a T-SQL script (for SQL Server), or multiple statements (for MySQL) -- instead of just one. If you pass the `:multi-rs true` option to `execute!`, you will get back a vector of results sets, instead of just one result set: a vector of zero or more vectors. The result may well be a mix of vectors containing realized rows and vectors containing update counts, reflecting the results from specific SQL operations in the stored procedure or script.
> Note: In general, you should use `execute-one!` for DDL operations since you will only get back an update count. If you have a SQL statement that you know will only return an update count, `execute-one!` is the right choice. If you have a SQL statement that you know will only return a single row in the result set, you probably want to use `execute-one!`. If you use `execute-one!` for a SQL statement that would return multiple rows in a result set, even though you will only get the first row back (as a hash map), the full result set will still be retrieved from the database -- it does not limit the SQL in any way.
### Options & Result Set Builders
All functions in `next.jdbc` (except `get-datasource`) can accept, as the optional last argument, a hash map containing a [variety of options](/doc/all-the-options.md) that control the behavior of the `next.jdbc` functions.
We saw `:return-keys` provided as an option to the `execute-one!` function above and mentioned the `:builder-fn` option just above that. As noted, the default behavior is to return rows as hash maps with namespace-qualified keywords identifying the column names with the table name as the qualifier. There's a whole chapter on [result set builders](/doc/result-set-builders.md) but here's a quick example showing how to get unqualified, lower case keywords instead:
```clojure
user=> (require '[next.jdbc.result-set :as rs])
nil
user=> (jdbc/execute-one! ds ["
insert into address(name,email)
values('Someone Else','some@elsewhere.com')
"] {:return-keys true :builder-fn rs/as-unqualified-lower-maps})
{:id 3}
user=> (jdbc/execute-one! ds ["select * from address where id = ?" 3]
{:builder-fn rs/as-unqualified-lower-maps})
{:id 3, :name "Someone Else", :email "some@elsewhere.com"}
user=>
```
Relying on the default result set builder -- and table-qualified column names -- is the recommended approach to take, if possible, with a few caveats:
* MS SQL Server produces unqualified column names by default (see [**Tips & Tricks**](/doc/tips-and-tricks.md) for how to get table names back from MS SQL Server),
* Oracle's JDBC driver doesn't support `.getTableName()` so it will only produce unqualified column names (also mentioned in **Tips & Tricks**),
* PostgreSQL's JDBC driver performs an extra SQL query to get the necessary metadata, so there is some overhead to using qualified column names (also mentioned in **Tips & Tricks**),
* If your SQL query joins tables in a way that produces duplicate column names, and you use unqualified column names, then those duplicated column names will conflict and you will get only one of them in your result -- use aliases in SQL (`as`) to make the column names distinct,
* If your SQL query joins a table to itself under different aliases, the _qualified_ column names will conflict because they are based on the underlying table name provided by the JDBC driver rather the alias you used in your query -- again, use aliases in SQL to make those column names distinct.
If you want to pass the same set of options into several operations, you can use `next.jdbc/with-options` to wrap your datasource (or connection) in a way that will pass "default options". Here's the example above rewritten with that:
```clojure
user=> (require '[next.jdbc.result-set :as rs])
nil
user=> (def ds-opts (jdbc/with-options ds {:builder-fn rs/as-unqualified-lower-maps}))
#'user/ds-opts
user=> (jdbc/execute-one! ds-opts ["
insert into address(name,email)
values('Someone Else','some@elsewhere.com')
"] {:return-keys true})
{:id 4}
user=> (jdbc/execute-one! ds-opts ["select * from address where id = ?" 4])
{:id 4, :name "Someone Else", :email "some@elsewhere.com"}
user=>
```
> Note: See the `next.jdbc/with-option` examples in the [**Datasources, Connections & Transactions**](#datasources-connections--transactions) below for some caveats around using this function.
In addition, two pre-built option hash maps are available in `next.jdbc`, that leverage the [camel-snake-kebab library](https://github.com/clj-commons/camel-snake-kebab/):
* `snake-kebab-opts` -- provides `:column-fn`, `:table-fn`, `:label-fn`, `:qualifier-fn`, and `:builder-fn` that will convert Clojure identifiers in `:kebab-case` to SQL entities in `snake_case` and will produce result sets with qualified `:kebab-case` names from SQL entities that use `snake_case`,
* `unqualified-snake-kebab-opts` -- provides `:column-fn`, `:table-fn`, `:label-fn`, `:qualifier-fn`, and `:builder-fn` that will convert Clojure identifiers in `:kebab-case` to SQL entities in `snake_case` and will produce result sets with _unqualified_ `:kebab-case` names from SQL entities that use `snake_case`.
You can `assoc` any additional options you need into these pre-built option hash maps
and pass the combined options into any of this library's functions.
> Note: Using `camel-snake-kebab` might also be helpful if your database has `camelCase` table and column names, although you'll have to provide `:column-fn` and `:table-fn` yourself as `->camelCase` from that library. Either way, consider relying on the _default_ result set builder first and avoid converting column and table names (see [Advantages of 'snake case': portability and ubiquity](https://vvvvalvalval.github.io/posts/clojure-key-namespacing-convention-considered-harmful.html#advantages_of_'snake_case':_portability_and_ubiquity) for an interesting discussion on kebab-case vs snake_case -- I do not agree with all of the author's points in that article, particularly his position against qualified keywords, but his argument for retaining snake_case around system boundaries is compelling).
### `plan` & Reducing Result Sets
While the `execute!` and `execute-one!` functions are fine for retrieving result sets as data, most of the time you want to process that data efficiently without necessarily converting the entire result set into a Clojure data structure, so `next.jdbc` provides a SQL execution function that works with `reduce` and with transducers to consume the result set without the intermediate overhead of creating Clojure data structures for every row.
We're going to create a new table that contains invoice items so we can see how to use `plan` without producing data structures:
```clojure
user=> (jdbc/execute-one! ds ["
create table invoice (
id int auto_increment primary key,
product varchar(32),
unit_price decimal(10,2),
unit_count int unsigned,
customer_id int unsigned
)"])
#:next.jdbc{:update-count 0}
user=> (jdbc/execute-one! ds ["
insert into invoice (product, unit_price, unit_count, customer_id)
values ('apple', 0.99, 6, 100),
('banana', 1.25, 3, 100),
('cucumber', 2.49, 2, 100)
"])
#:next.jdbc{:update-count 3}
user=> (reduce
(fn [cost row]
(+ cost (* (:unit_price row)
(:unit_count row))))
0
(jdbc/plan ds ["select * from invoice where customer_id = ?" 100]))
14.67M
```
The call to `jdbc/plan` returns an `IReduceInit` object (a "reducible collection" that requires an initial value) but does not actually run the SQL.
Only when the returned object is reduced is the connection obtained from the data source, the SQL executed, and the computation performed. The connection is closed automatically when the reduction is complete. The `row` in the reduction is an abstraction over the underlying (mutable) `ResultSet` object -- it is not a Clojure data structure. Because of that, you can simply access the columns via their SQL labels as shown -- you do not need to use the column-qualified name, and you do not need to worry about the database returning uppercase column names (SQL labels are not case sensitive).
> Note: if you want a column name transformation to be applied here, specify `:column-fn` as an option to the `plan` call.
Here's the same computation rewritten using `transduce`:
```clojure
user=> (transduce
(map #(* (:unit_price %) (:unit_count %)))
+
0
(jdbc/plan ds ["select * from invoice where customer_id = ?" 100]))
14.67M
```
or composing the transforms:
```clojure
user=> (transduce
(comp (map (juxt :unit_price :unit_count))
(map #(apply * %)))
+
0
(jdbc/plan ds ["select * from invoice where customer_id = ?" 100]))
14.67M
```
If you just wanted the total item count:
```clojure
user=> (transduce
(map :unit_count)
+
0
(jdbc/plan ds ["select * from invoice where customer_id = ?" 100]))
11
```
You can use other functions that perform reductions to process the result of `plan`, such as obtaining a set of unique products from an invoice:
```clojure
user=> (into #{}
(map :product)
(jdbc/plan ds ["select * from invoice where customer_id = ?" 100]))
#{"apple" "banana" "cucumber"}
```
If you want to process the rows purely for side-effects, without a result, you
can use `run!`:
```clojure
user=> (run! #(println (:product %))
(jdbc/plan ds ["select * from invoice where customer_id = ?" 100]))
apple
banana
cucumber
nil
```
Any operation that can perform key-based lookup can be used here without creating hash maps from the rows: `get`, `contains?`, `find` (returns a `MapEntry` of whatever key you requested and the corresponding column value), or direct keyword access as shown above. Any operation that would require a Clojure hash map, such as `assoc` or anything that invokes `seq` (`keys`, `vals`), will cause the full row to be expanded into a hash map, such as produced by `execute!` or `execute-one!`, which implements `Datafiable` and `Navigable` and supports lazy navigation via foreign keys, explained in [`datafy`, `nav`, and the `:schema` option](/doc/datafy-nav-and-schema.md).
This means that `select-keys` can be used to create regular Clojure hash map from (a subset of) columns in the row, without realizing the row, and it will not implement `Datafiable` or `Navigable`.
If you wish to create a Clojure hash map that supports that lazy navigation, you can call `next.jdbc.result-set/datafiable-row`, passing in the current row, a `connectable`, and an options hash map, just as you passed into `plan`. Compare the difference in output between these four expressions (see below for a simpler way to do this):
```clojure
;; selects specific keys (as simple keywords):
user=> (into []
(map #(select-keys % [:id :product :unit_price :unit_count :customer_id]))
(jdbc/plan ds ["select * from invoice where customer_id = ?" 100]))
;; selects specific keys (as qualified keywords):
user=> (into []
(map #(select-keys % [:invoice/id :invoice/product
:invoice/unit_price :invoice/unit_count
:invoice/customer_id]))
(jdbc/plan ds ["select * from invoice where customer_id = ?" 100]))
;; selects specific keys (as qualified keywords -- ignoring the table name):
user=> (into []
(map #(select-keys % [:foo/id :bar/product
:quux/unit_price :wibble/unit_count
:blah/customer_id]))
(jdbc/plan ds ["select * from invoice where customer_id = ?" 100]))
;; do not do this:
user=> (into []
(map #(into {} %))
(jdbc/plan ds ["select * from invoice where customer_id = ?" 100]))
;; do this if you just want realized rows with default qualified names:
user=> (into []
(map #(rs/datafiable-row % ds {}))
(jdbc/plan ds ["select * from invoice where customer_id = ?" 100]))
```
The latter produces a vector of hash maps, just like the result of `execute!`, where each "row" follows the case conventions of the database, the keys are qualified by the table name, and the hash map is datafiable and navigable. The third expression produces a result that looks identical but has stripped all the metadata away: it has still called `rs/datafiable-row` to fully-realize a datafiable and navigable hash map but it has then "poured" that into a new, empty hash map, losing the metadata.
In addition to the hash map operations described above, the abstraction over the `ResultSet` can also respond to a couple of functions in `next.jdbc.result-set`:
* `next.jdbc.result-set/row-number` - returns the 1-based row number, by calling `.getRow()` on the `ResultSet`,
* `next.jdbc.result-set/column-names` - returns a vector of column names from the `ResultSet`, as created by the result set builder specified,
* `next.jdbc.result-set/metadata` - returns the `ResultSetMetaData` object, datafied (so the result will depend on whether you have required `next.jdbc.datafy`).
> Note: Apache Derby requires the following options to be provided in order to call `.getRow()` (and therefore `row-number`): `{:concurrency :read-only, :cursors :close, :result-type :scroll-insensitive}`
If you realize a row, by calling `datafiable-row` on the abstract row passed into the reducing function, you can still call `row-number` and `column-names` on that realized row. These functions are _not_ available on the realized rows returned from `execute!` or `execute-one!`, only within reductions over `plan`.
The order of the column names returned by `column-names` matches SQL's natural order, based on the operation performed, and will also match the order of column values provided in the reduction when using an array-based result set builder (`plan` provides just the column values, one row at a time, when using an array-based builder, without the leading vector of column names that you would get from `execute!`: if you call `datafiable-row` on such a row, you will get a realized vector of column values).
> Note: since `plan` expects you to process the result set via reduction, you should not use it for DDL or for SQL statements that only produce update counts.
As of 1.1.588, two helper functions are available to make some `plan` operations easier:
* `next.jdbc.plan/select-one!` -- reduces over `plan` and returns part of just the first row,
* `next.jdbc.plan/select!` -- reduces over `plan` and returns a sequence of parts of each row.
> Note: in both those cases, an appropriate initial value is supplied to the `reduce` (since `plan` returns an `IReduceInit` object).
`select!` accepts a vector of column names to extract or a function to apply to each row. It is equivalent to the following:
```clojure
;; select! with vector of column names:
user=> (into [] (map #(select-keys % cols)) (jdbc/plan ...))
;; select! with a function:
user=> (into [] (map f) (jdbc/plan ...))
```
The `:into` option lets you override the default of `[]` as the first argument to `into`.
`select-one!` performs the same transformation on just the first row returned from a reduction over `plan`, equivalent to the following:
```clojure
;; select-one! with vector of column names:
user=> (reduce (fn [_ row] (reduced (select-keys row cols))) nil (jdbc/plan ...))
;; select-one! with a function:
user=> (reduce (fn [_ row] (reduced (f row))) nil (jdbc/plan ...))
```
For example:
```clojure
;; select columns:
user=> (plan/select-one!
ds [:n] ["select count(*) as n from invoice where customer_id = ?" 100])
{:n 3}
;; apply a function:
user=> (plan/select-one!
ds :n ["select count(*) as n from invoice where customer_id = ?" 100])
3
```
Here are some of the above sequence-producing operations, showing their `select!` equivalent:
```clojure
user=> (require '[next.jdbc.plan :as plan])
nil
user=> (into #{}
(map :product)
(jdbc/plan ds ["select * from invoice where customer_id = ?" 100]))
#{"apple" "banana" "cucumber"}
;; or:
user=> (plan/select! ds
:product
["select * from invoice where customer_id = ?" 100]
{:into #{}}) ; product a set, rather than a vector
#{"apple" "banana" "cucumber"}
;; selects specific keys (as simple keywords):
user=> (into []
(map #(select-keys % [:id :product :unit_price :unit_count :customer_id]))
(jdbc/plan ds ["select * from invoice where customer_id = ?" 100]))
;; or:
user=> (plan/select! ds
[:id :product :unit_price :unit_count :customer_id]
["select * from invoice where customer_id = ?" 100])
;; selects specific keys (as qualified keywords):
user=> (into []
(map #(select-keys % [:invoice/id :invoice/product
:invoice/unit_price :invoice/unit_count
:invoice/customer_id]))
(jdbc/plan ds ["select * from invoice where customer_id = ?" 100]))
;; or:
user=> (plan/select! ds
[:invoice/id :invoice/product
:invoice/unit_price :invoice/unit_count
:invoice/customer_id]
["select * from invoice where customer_id = ?" 100])
;; selects specific keys (as qualified keywords -- ignoring the table name):
user=> (into []
(map #(select-keys % [:foo/id :bar/product
:quux/unit_price :wibble/unit_count
:blah/customer_id]))
(jdbc/plan ds ["select * from invoice where customer_id = ?" 100]))
;; or:
user=> (plan/select! ds
[:foo/id :bar/product
:quux/unit_price :wibble/unit_count
:blah/customer_id]
["select * from invoice where customer_id = ?" 100])
```
> Note: you need to be careful when using stateful transducers, such as `partition-by`, when reducing over the result of `plan`. Since `plan` returns an `IReduceInit`, the resource management (around the `ResultSet`) only applies to the `reduce` operation: many stateful transducers have a completing function that will access elements of the result sequence -- and this will usually fail after the reduction has cleaned up the resources. This is an inherent problem with stateful transducers over resource-managing reductions with no good solution.
## Datasources, Connections & Transactions
In the examples above, we created a datasource and then passed it into each function call. When `next.jdbc` is given a datasource, it creates a `java.sql.Connection` from it, uses it for the SQL operation (by creating and populating a `java.sql.PreparedStatement` from the connection and the SQL string and parameters passed in), and then closes it. If you're not using a connection pooling datasource (see below), that can be quite an overhead: setting up database connections to remote servers is not cheap!
If you want to run multiple SQL operations without that overhead each time, you can create the connection yourself and reuse it across several operations using `with-open` and `next.jdbc/get-connection`:
```clojure
(with-open [con (jdbc/get-connection ds)]
(jdbc/execute! con ...)
(jdbc/execute! con ...)
(into [] (map :column) (jdbc/plan con ...)))
```
If any of these operations throws an exception, the connection will still be closed but operations prior to the exception will have already been committed to the database. If you want to reuse a connection across multiple operations but have them all rollback if an exception occurs, you can use `next.jdbc/with-transaction`:
```clojure
(jdbc/with-transaction [tx ds]
(jdbc/execute! tx ...)
(jdbc/execute! tx ...)
(into [] (map :column) (jdbc/plan tx ...)))
```
`with-transaction` behaves somewhat like Clojure's `with-open` macro: it will (generally) create a new `Connection` for you (from `ds`) and set up a transaction on it and bind it to `tx`; if the code in the body executes successfully, it will commit the transaction and close the `Connection`; if the code in the body throws an exception, it will rollback the transaction, but still close the `Connection`.
If `ds` is a `Connection`, `with-transaction` will just bind `tx` to that but will set up a transaction on that `Connection`; run the code in the body and either commit or rollback the transaction; it will leave the `Connection` open (but try to restore the state of the `Connection`).
If `ds` is a datasource, `with-transaction` will call `get-connection` on it, bind `tx` to that `Connection` and set up a transaction; run the code in the body and either commit or rollback the transaction; close the `Connection`.
If `ds` is something else, `with-transaction` will call `get-datasource` on it first and then proceed as above.
Here's what will happen in the case where `with-transaction` is given a `Connection`:
```clojure
(with-open [con (jdbc/get-connection ds)]
(jdbc/execute! con ...) ; auto-committed
(jdbc/with-transaction [tx con] ; will commit or rollback this group:
;; note: tx is bound to the same Connection object as con
(jdbc/execute! tx ...)
(jdbc/execute! tx ...)
(into [] (map :column) (jdbc/plan tx ...)))
(jdbc/execute! con ...)) ; auto-committed
```
You can read more about [working with transactions](/doc/transactions.md) further on in the documentation.
> Note: Because `get-datasource` and `get-connection` return plain JDBC objects (`javax.sql.DataSource` and `java.sql.Connection` respectively), `next.jdbc/with-options` and `next.jdbc/with-logging` (see **Logging** below) cannot flow options across those calls, so if you are explicitly managing connections or transactions as above, you would need to have local bindings for the wrapped versions:
```clojure
(with-open [con (jdbc/get-connection ds)]
(let [con-opts (jdbc/with-options con some-options)]
(jdbc/execute! con-opts ...) ; auto-committed
(jdbc/with-transaction [tx con-opts] ; will commit or rollback this group:
(let [tx-opts (jdbc/with-options tx (:options con-opts))]
(jdbc/execute! tx-opts ...)
(jdbc/execute! tx-opts ...)
(into [] (map :column) (jdbc/plan tx-opts ...))))
(jdbc/execute! con-opts ...))) ; auto-committed
```
As of 1.3.894, you can use `next.jdbc/with-transaction+options` instead,
which will automatically rewrap the `Connection` with the options from the
initial transactable. Be aware that means you cannot use Java interop on the
new connectable because it is no longer a plain Java `java.sql.Connection` object.
### Prepared Statement Caveat
Not all databases support using a `PreparedStatement` for every type of SQL operation. You might have to create a `java.sql.Statement` instead, directly from a `java.sql.Connection` and use that, without parameters, in `plan`, `execute!`, or `execute-one!`. See the following example:
```clojure
(require '[next.jdbc.prepare :as prep])
(with-open [con (jdbc/get-connection ds)]
(jdbc/execute! (prep/statement con) ["...just a SQL string..."])
(jdbc/execute! con ["...some SQL..." "and" "parameters"]) ; uses PreparedStatement
(into [] (map :column) (jdbc/plan (prep/statement con) ["..."])))
```
## Connection Pooling
`next.jdbc` makes it easy to use either HikariCP or c3p0 for connection pooling.
First, you need to add the connection pooling library as a dependency, e.g.,
```clojure
com.zaxxer/HikariCP {:mvn/version "5.0.1"}
;; or:
com.mchange/c3p0 {:mvn/version "0.9.5.5"}
```
_Check those libraries' documentation for the latest version to use!_
Then import the appropriate classes into your code:
```clojure
(ns my.main
(:require [next.jdbc :as jdbc]
[next.jdbc.connection :as connection])
(:import (com.zaxxer.hikari HikariDataSource)
;; or:
(com.mchange.v2.c3p0 ComboPooledDataSource PooledDataSource)))
```
Finally, create the connection pooled datasource. `db-spec` here contains the regular `next.jdbc` options (`:dbtype`, `:dbname`, and maybe `:host`, `:port`, `:classname` etc -- or the `:jdbcUrl` format mentioned above). Those are used to construct the JDBC URL that is passed into the datasource object (by calling `.setJdbcUrl` on it). You can also specify any of the connection pooling library's options, as mixed case keywords corresponding to any simple setter methods on the class being passed in, e.g., `:connectionTestQuery`, `:maximumPoolSize` (HikariCP), `:maxPoolSize`, `:preferredTestQuery` (c3p0).
In addition, for HikariCP, you can specify properties to be applied to the underlying `DataSource` itself by passing `:dataSourceProperties` with a hash map containing those properties, such as `:socketTimeout`:
```clojure
;; assumes next.jdbc.connection has been required as connection
(connection/->pool com.zaxxer.hikari.HikariDataSource
{:dbtype "postgres" :dbname "thedb" :username "dbuser" :password "secret"
:dataSourceProperties {:socketTimeout 30}})
```
_(under the hood, `java.data` converts that hash map to a `java.util.Properties` object with `String` keys and `String` values)_
If you need to pass in extra connection URL parameters, it can be easier to use
`next.jdbc.connection/jdbc-url` to construct URL, e.g.,
```clojure
(connection/->pool com.zaxxer.hikari.HikariDataSource
{:jdbcUrl
(connection/jdbc-url {:dbtype "mysql" :dbname "thedb" :useSSL false})
:username "dbuser" :password "secret"})
```
Here we pass `:useSSL false` to `jdbc-url` so that it ends up in the
connection string, but pass `:username` and `:password` for the pool itself.
> Note: both HikariCP and c3p0 defer validation of the settings until a connection is requested. If you want to ensure that your datasource is set up correctly, and the database is reachable, when you first create the connection pool, you will need to call `jdbc/get-connection` on it (and then close that connection and return it to the pool). This will also ensure that the pool is fully initialized. See the examples below.
Some important notes regarding HikariCP:
* Authentication credentials must use `:username` (if you are using c3p0 or regular, non-pooled, connections, then the db-spec hash map must contain `:user`).
* When using `:dbtype "jtds"`, you must specify `:connectionTestQuery "SELECT 1"` (or some other query to verify the health of a connection) because the jTDS JDBC driver does not implement `.isValid()` so HikariCP requires a specific test query instead (c3p0 does not rely on this method so it works with jTDS without needing `:preferredTestQuery`).
* When using PostgreSQL, and trying to set a default `:schema` via HikariCP, you will need to specify `:connectionInitSql "COMMIT;"` until [this HikariCP issue](https://github.com/brettwooldridge/HikariCP/issues/1369) is addressed.
You will generally want to create the connection pooled datasource at the start of your program (and close it before you exit, although that's not really important since it'll be cleaned up when the JVM shuts down):
```clojure
(defn -main [& args]
;; db-spec must include :username
(with-open [^HikariDataSource ds (connection/->pool HikariDataSource db-spec)]
;; this code initializes the pool and performs a validation check:
(.close (jdbc/get-connection ds))
;; otherwise that validation check is deferred until the first connection
;; is requested in a regular operation:
(jdbc/execute! ds ...)
(jdbc/execute! ds ...)
(do-other-stuff ds args)
(into [] (map :column) (jdbc/plan ds ...))))
;; or:
(defn -main [& args]
(with-open [^PooledDataSource ds (connection/->pool ComboPooledDataSource db-spec)]
;; this code initializes the pool and performs a validation check:
(.close (jdbc/get-connection ds))
;; otherwise that validation check is deferred until the first connection
;; is requested in a regular operation:
(jdbc/execute! ds ...)
(jdbc/execute! ds ...)
(do-other-stuff ds args)
(into [] (map :column) (jdbc/plan ds ...))))
```
You only need the type hints on `ds` if you plan to call methods on it via Java interop, such as `.close` (or using `with-open` to auto-close it) and you want to avoid reflection.
If you are using [Component](https://github.com/stuartsierra/component), a connection pooled datasource is a good candidate since it has a `start`/`stop` lifecycle. `next.jdbc` has support for Component built-in, via the `next.jdbc.connection/component` function which creates a Component-compatible entity which you can `start` and then invoke as a function with no arguments to obtain the `DataSource` within.
```clojure
(ns my.data.program
(:require [com.stuartsierra.component :as component]
[next.jdbc :as jdbc]
[next.jdbc.connection :as connection])
(:import (com.zaxxer.hikari HikariDataSource)))
;; HikariCP requires :username instead of :user in the db-spec:
(def ^:private db-spec {:dbtype "..." :dbname "..." :username "..." :password "..."})
(defn -main [& args]
;; connection/component takes the same arguments as connection/->pool:
(let [ds (component/start (connection/component HikariDataSource db-spec))]
(try
;; "invoke" the data source component to get the javax.sql.DataSource:
(jdbc/execute! (ds) ...)
(jdbc/execute! (ds) ...)
;; can pass the data source component around other code:
(do-other-stuff ds args)
(into [] (map :column) (jdbc/plan (ds) ...))
(finally
;; stopping the component will close the connection pool:
(component/stop ds)))))
```
If you have want to either modify the connection pooled datasource after it is
created, or want to perform some database initialization, you can pass a
function as `:init-fn` in the `db-spec` hash map. The `component` function
will arrange for that initialization function to be invoked on the newly-created
datasource whenever `start` is called on the Component returned.
## Working with Additional Data Types
By default, `next.jdbc` relies on the JDBC driver to handle all data type conversions when reading from a result set (to produce Clojure values from SQL values) or setting parameters (to produce SQL values from Clojure values). Sometimes that means that you will get back a database-specific Java object that would need to be manually converted to a Clojure data structure, or that certain database column types require you to manually construct the appropriate database-specific Java object to pass into a SQL operation. You can usually automate those conversions using either the [`ReadableColumn` protocol](/doc/result-set-builders.md#readablecolumn) (for converting database-specific types to Clojure values) or the [`SettableParameter` protocol](/doc/prepared-statements.md#prepared-statement-parameters) (for converting Clojure values to database-specific types).
In particular, PostgreSQL does not seem to perform a conversion from `java.util.Date` to a SQL data type automatically. You can `require` the [`next.jdbc.date-time` namespace](https://cljdoc.org/d/com.github.seancorfield/next.jdbc/CURRENT/api/next.jdbc.date-time) to enable that conversion.
If you are working with Java Time, some JDBC drivers will automatically convert `java.time.Instant` (and `java.time.LocalDate` and `java.time.LocalDateTime`) to a SQL data type automatically, but others will not. Requiring `next.jdbc.date-time` will enable those automatic conversions for all databases.
> Note: `next.jdbc.date-time` also provides functions you can call to enable automatic conversion of SQL date/timestamp types to Clojure data types when reading result sets. If you need specific conversions beyond that to happen automatically, consider extending the `ReadableColumn` protocol, mentioned above.
The `next.jdbc.types` namespace provides over three dozen convenience functions for "type hinting" values so that the JDBC driver might automatically handle some conversions that the default parameter setting function does not. Each function is named for the corresponding SQL type, prefixed by `as-`: `as-bigint`, `as-other`, `as-real`, etc. An example of where this helps is when dealing with PostgreSQL enumerated types: the default behavior, when passed a string that should correspond to an enumerated type, is to throw an exception that `column "..." is of type ... but expression is of type character varying`. You can wrap such strings with `(as-other "...")` which tells PostgreSQL to treat this as `java.sql.Types/OTHER` when setting the parameter.
## Processing Database Metadata
JDBC provides several features that let you introspect the database to obtain lists of tables, views, and so on. `next.jdbc` does not provide any specific functions for this but you can easily get this metadata from a `java.sql.Connection` and turn it into Clojure data as follows:
```clojure
(with-open [con (p/get-connection ds opts)]
(-> (.getMetaData con) ; produces java.sql.DatabaseMetaData
;; return a java.sql.ResultSet describing all tables and views:
(.getTables nil nil nil (into-array ["TABLE" "VIEW"]))
(rs/datafiable-result-set ds opts)))
```
Several methods on `DatabaseMetaData` return a `ResultSet` object, e.g., `.getCatalogs()`, `.getClientInfoProperties()`, `.getSchemas()`.
All of those can be handled in a similar manner to the above. See the [Oracle documentation for `java.sql.DatabaseMetaData`](https://docs.oracle.com/en/java/javase/11/docs/api/java.sql/java/sql/DatabaseMetaData.html) (Java 11) for more details.
If you are working with a generalized datasource that may be a `Connection`, a `DataSource`,
or a wrapped connectable (via something like `with-options` or `with-transaction`), you can
write generic, `Connection`-based code using `on-connection` which will reuse a `Connection`
if one is passed or create a new one if needed (and automatically close it afterward):
```clojure
(on-connection [con ds]
(-> (.getMetaData con) ; produces java.sql.DatabaseMetaData
;; return a java.sql.ResultSet describing all tables and views:
(.getTables nil nil nil (into-array ["TABLE" "VIEW"]))
(rs/datafiable-result-set ds opts)))
```
> Note: to avoid confusion and/or incorrect usage, you cannot pass options to `on-connection` because they would be ignored in some cases (existing `Connection` or a wrapped `Connection`).
As of 1.3.894, if you want the options from a wrapped connectable to flow
through to the new connectable inside `on-connection`, you can use the
`on-connection+options` variant of the macro. This will automatically rewrap
the connectable produced with the options from the initial connectable.
Be aware that means you cannot
use plain Java interop inside the body of the macro because the connectable
is no longer a plain Java `java.sql.Connection` object.
## Logging
Sometimes it is convenient to have database operations logged automatically. `next.jdbc/with-logging`
provides a way to wrap a datasource (or connection) so that operations on it will be logged via
functions you provide.
There are two logging points:
* Logging the SQL and parameters prior to a database operation,
* Logging the result of a database operation.
`next.jdbc/with-logging` accepts two or three arguments and returns a connectable that can
be used with `plan`, `execute!`, `execute-one!`, `prepare`, or any of the "friendly SQL
functions". Since it uses a similar wrapping mechanism to `next.jdbc/with-options`, the
same caveats apply -- see [**Datasources, Connections & Transactions**](#datasources-connections--transactions) above for details.
### Logging SQL and Parameters
```clojure
(let [log-ds (jdbc/with-logging ds my-sql-logger)]
(jdbc/execute! log-ds ["some SQL" "and" "params"])
...
(jdbc/execute! log-ds ["more SQL" "other" "params"]))
```
The `my-sql-logger` function will be invoked for each database operation, with two arguments:
* The fully-qualified symbol identifying the operation,
* The vector containing the SQL string followed by the parameters.
The symbol will be one of: `next.jdbc/plan`, `next.jdbc/execute!`, `next.jdbc/execute-one!`,
or `next.jdbc/prepare`. The friendly SQL functions invoke `execute!` or `execute-one!` under
the hood, so that is how they will be logged.
The logging function can do anything it wants with the SQL and parameters. If you are logging
parameter values, consider sensitive data that you might be passing in.
### Logging Results
```clojure
(let [log-ds (jdbc/with-logging ds my-sql-logger my-result-logger)]
(jdbc/execute! log-ds ["some SQL" "and" "params"])
...
(jdbc/execute! log-ds ["more SQL" "other" "params"]))
```
In addition to calling `my-sql-logger` as described above, this will also call `my-result-logger`
for `execute!` and `execute-one!` operations (`plan` and `prepare` do not execute database
operations directly so they do not produce results). `my-result-logger` will be invoked with
three arguments:
* The fully-qualified symbol identify the operation,
* A "state" argument (the result of calling `my-sql-logger`),
* The result set data structure, if the call succeeded, or the exception if it failed.
The return value of the result logger function is ignored.
The symbol will be one of: `next.jdbc/execute!` or `next.jdbc/execute-one!`. The friendly
SQL functions invoke `execute!` or `execute-one!` under the hood, so that is how they will
be logged.
The "state" argument allows you to return data from the first logging function, such as the
current time, that can be consumed by the second logging function, so that you can calculate
how long an `execute!` or `execute-one!` operation took. If the first logging function
returns `nil`, that will be passed as the second argument to your second logging function.
The result set data structure could be arbitrarily large. It will generally be a vector
for calls to `execute!` or a hash map for calls to `execute-one!`, but its shape is determined
by any `:builder-fn` options in effect. You should check if `(instance? Throwable result)`
to see if the call failed and the logger has been called with the thrown exception.
For `plan` and `prepare` calls, only the first logging function is invoked (and the return
value is ignored). You can use the symbol passed in to determine this.
### Naive Logging with Timing
This example prints all SQL and parameters to `*out*` along with millisecond timing and
results, if a result set is available:
```clojure
dev=> (def lds (jdbc/with-logging ds
#_=> (fn [sym sql-params]
#_=> (prn sym sql-params)
#_=> (System/currentTimeMillis))
#_=> (fn [sym state result]
#_=> (prn sym
#_=> (- (System/currentTimeMillis) state)
#_=> (if (map? result) result (count result))))))
#'dev/lds
dev=> (sql/find-by-keys lds :foo {:name "Person"})
next.jdbc/execute! ["SELECT * FROM foo WHERE name = ?" "Person"]
next.jdbc/execute! 813 1
[#:FOO{:NAME "Person"}]
dev=>
```
A more sophisticated example could use `sym` to decide whether to just log the SQL and
some parameter values or return the current time and the SQL and parameters, so that the
result logging could log the SQL, parameters, _and_ result set information with timing.
## Support from Specs
As you are developing with `next.jdbc`, it can be useful to have assistance from `clojure.spec` in checking calls to `next.jdbc`'s functions, to provide explicit argument checking and/or better error messages for some common mistakes, e.g., trying to pass a plain SQL string where a vector (containing a SQL string, and no parameters) is expected.
You can enable argument checking for functions in `next.jdbc`, `next.jdbc.connection`, `next.jdbc.prepare`, and `next.jdbc.sql` by requiring the `next.jdbc.specs` namespace and instrumenting the functions. A convenience function is provided:
```clojure
(require '[next.jdbc.specs :as specs])
(specs/instrument) ; instruments all next.jdbc API functions
(jdbc/execute! ds "SELECT * FROM fruit")
Call to #'next.jdbc/execute! did not conform to spec.
```
In the `:problems` output, you'll see the `:path [:sql :sql-params]` and `:pred vector?` for the `:val "SELECT * FROM fruit"`. Without the specs' assistance, this mistake would produce a more cryptic error, a `ClassCastException`, that a `Character` cannot be cast to a `String`, from inside `next.jdbc.prepare`.
A convenience function also exists to revert that instrumentation:
```clojure
(specs/unstrument) ; undoes the instrumentation of all next.jdbc API functions
```
next-jdbc-1.3.955/doc/migration-from-clojure-java-jdbc.md 0000664 0000000 0000000 00000041064 14700603111 0023053 0 ustar 00root root 0000000 0000000 # Migrating from `clojure.java.jdbc`
This page attempts to list all of the differences between [clojure.java.jdbc](https://github.com/clojure/java.jdbc) and `next.jdbc`. Some of them are large and obvious, some of them are small and subtle -- all of them are deliberate design choices.
## Conceptually
`clojure.java.jdbc` focuses heavily on a `db-spec` hash map to describe the various ways of interacting with the database and grew from very imperative origins that expose a lot of the JDBC API (multiple types of SQL execution, some operations returned hash maps, others update counts as integers, etc).
`next.jdbc` focuses on using protocols and native Java JDBC types where possible (for performance and simplicity) and strives to present a more modern Clojure API with namespace-qualified keywords in hash maps, reducible SQL operations as part of the primary API, and a streamlined set of SQL execution primitives. Execution always returns a hash map (for one result) or a vector of hash maps (for multiple results) -- even update counts are returned as if they were result sets.
### Rows and Result Sets
`clojure.java.jdbc` returned result sets (and generated keys) as hash maps with simple, lower-case keys by default. `next.jdbc` returns result sets (and generated keys) as hash maps with qualified, as-is keys by default: each key is qualified by the name of table from which it is drawn, if known. The as-is default is chosen to a) improve performance and b) not mess with the data. Using a `:builder-fn` option of `next.jdbc.result-set/as-unqualified-maps` will produce simple, as-is keys. Using a `:builder-fn` option of `next.jdbc.result-set/as-unqualified-lower-maps` will produce simple, lower-case keys -- the most compatible with `clojure.java.jdbc`'s default behavior.
> Note: `clojure.java.jdbc` would make column names unique by appending numeric suffixes, for example in a `JOIN` that produced columns `id` from multiple tables. `next.jdbc` does not do this: if you use _qualified_ column names -- the default -- then you would get `:sometable/id` and `:othertable/id`, but with _unqualified_ column names you would just get one `:id` in each row. It was always poor practice to rely on `clojure.java.jdbc`'s renaming behavior and it added quite an overhead to result set building, which is why `next.jdbc` does not support it -- use explicit column aliasing in your SQL instead if you want _unqualified_ column names!
If you used `:as-arrays? true`, you will most likely want to use a `:builder-fn` option of `next.jdbc.result-set/as-unqualified-lower-arrays`.
> Note: When `next.jdbc` cannot obtain a `ResultSet` object and returns `{:next.jdbc/count N}` instead, these builder functions are not applied -- the `:builder-fn` option is not used in that situation.
### Transactions
Although both libraries support transactions -- via `clojure.java.jdbc/with-db-transaction` and
via `next.jdbc/with-transaction` -- there are some important considerations when you are migrating:
* `clojure.java.jdbc/with-db-transaction` allows nested calls to be present but it tracks the "depth" of such calls and "nested" calls are simply ignored (because transactions do not actually nest in JDBC).
* `next.jdbc/with-transaction` will attempt to set up a transaction on an existing `Connection` if that is what it is passed (otherwise a new `Connection` is created and a new transaction set up on that). That means that if you have nested calls, the inner transaction will commit (or rollback) all the way to the outermost transaction. `next.jdbc` "trusts" the programmer to know what they are doing. You can bind `next.jdbc.transaction/*nested-tx*` to `:ignore` if you want the same behavior as `clojure.java.jdbc` where all nested calls are ignored and the outermost transaction is in full control. _Note that this is a per-thread "global" setting and not related to just a single connection, so you can't use this setting if you are working with multiple databases in the same dynamic thread context (`binding`)._
* Every operation in `clojure.java.jdbc` attempts to create its own transaction, which is a no-op inside an `with-db-transaction` so it is safe; transactions are _implicit_ in `clojure.java.jdbc`. However, if you have migrated that `with-db-transaction` call over to `next.jdbc/with-transaction` then any `clojure.java.jdbc` operations invoked inside the body of that migrated transaction _will still try to create their own transactions_ and `with-db-transaction` won't know about the outer `with-transaction` call. That means you will effectively get the "overlapping" behavior of `next.jdbc` since the `clojure.java.jdbc` operation will cause the outermost transaction to be committed or rolled back.
* None of the operations in `next.jdbc` try to create transactions -- exception `with-transaction`. All `Connection`s are auto-commit by default so it doesn't need the local transactions that `clojure.java.jdbc` tries to create; transactions are _explicit_ in `next.jdbc`.
There are some strategies you can take to mitigate these differences:
1. Migrate code bottom-up so that you don't end up with calls to `clojure.java.jdbc` operations inside `next.jdbc/with-transaction` calls.
2. When you migrate a `with-db-transaction` call, think carefully about whether it could be a nested call (in which case simply remove it) or a conditionally nested call which you'll need to be much more careful about migrating.
3. You can bind `next.jdbc.transaction/*nested-tx*` to `:prohibit` which will throw exceptions if you accidentally nest calls to `next.jdbc/with-transaction`. Although you can bind it to `:ignore` in order to mimic the behavior of `clojure.java.jdbc`, that should be considered a last resort for dealing with complex conditional nesting of transaction calls. _Note that this is a per-thread "global" setting and not related to just a single connection, so you can't use this setting if you are working with multiple databases in the same dynamic thread context (`binding`)._
### Option Handling
Because `clojure.java.jdbc` focuses on a hash map for the `db-spec` that is passed around, it can hold options that act as defaults for all operations on it. In addition, all operations in `clojure.java.jdbc` can accept a hash map of options and can pass those options down the call chain. In `next.jdbc`, `get-datasource`, `get-connection`, and `prepare` all produce Java objects that cannot have any extra options attached. On one hand, that means that it is harder to provide "default options", and on the other hand it means you need to be a bit more careful to ensure that you pass the appropriate options to the appropriate function, since they cannot be passed through the call chain via the `db-spec`. That's where `next.jdbc/with-options` can come in handy to wrap a connectable (generally a datasource or a connection) but be careful where you are managing connections and/or transactions directly, as mentioned in the [Getting Started](/doc/getting-started.md) guide.
In [All The Options](all-the-options.md), the appropriate options are shown for each function, as well as which options _will_ get passed down the call chain, e.g., if a function can open a connection, it will accept options for `get-connection`; if a function can build a result set, it will accept `:builder-fn`. However, `get-datasource`, `get-connection`, and `prepare` cannot propagate options any further because they produce Java objects as their results -- in particular, `prepare` can't accept `:builder-fn` because it doesn't build result sets: only `plan`, `execute-one!`, and `execute!` can use `:builder-fn`.
In particular, this means that you can't globally override the default options (as you could with `clojure.java.jdbc` by adding your preferred defaults to the db-spec itself). If the default options do not suit your usage and you really don't want to override them in every call, it is recommended that you try to use `next.jdbc/with-options` first, and if that still doesn't satisfy you, write a wrapper namespace that implements the subset of the dozen API functions (from `next.jdbc` and `next.jdbc.sql`) that you want to use, overriding their `opts` argument with your defaults.
## Primary API
`next.jdbc` has a deliberately narrow primary API that has (almost) no direct overlap with `clojure.java.jdbc`:
* `get-datasource` -- has no equivalent in `clojure.java.jdbc` but is intended to emphasize `javax.sql.DataSource` as a starting point,
* `get-connection` -- overlaps with `clojure.java.jdbc` (and returns a `java.sql.Connection`) but accepts only a subset of the options (`:dbtype`/`:dbname` hash map, `String` JDBC URL); `clojure.java.jdbc/get-connection` accepts `{:datasource ds}` whereas `next.jdbc/get-connection` accepts the `javax.sql.DataSource` object directly,
* `prepare` -- somewhat similar to `clojure.java.jdbc/prepare-statement` but it accepts a vector of SQL and parameters (compared to just a raw SQL string),
* `plan` -- somewhat similar to `clojure.java.jdbc/reducible-query` but accepts arbitrary SQL statements for execution,
* `execute!` -- has no direct equivalent in `clojure.java.jdbc` (but it can replace most uses of both `query` and `db-do-commands`),
* `execute-one!` -- has no equivalent in `clojure.java.jdbc` (but it can replace most uses of `query` that currently use `:result-set-fn first`),
* `transact` -- similar to `clojure.java.jdbc/db-transaction*`,
* `with-transaction` -- similar to `clojure.java.jdbc/with-db-transaction`,
* `with-options` -- provides a way to specify "default options" over a group of operations, by wrapping the connectable (datasource or connection).
If you were using a bare `db-spec` hash map with `:dbtype`/`:dbname`, or a JDBC URL string everywhere, that should mostly work with `next.jdbc` since most functions accept a "connectable", but it would be better to create a datasource first, and then pass that around. Note that `clojure.java.jdbc` allowed the `jdbc:` prefix in a JDBC URL to be omitted but `next.jdbc` _requires that prefix!_
If you were already creating `db-spec` as a pooled connection datasource -- a `{:datasource ds}` hashmap -- then passing `(:datasource db-spec)` to the `next.jdbc` functions is the simplest migration path. If you are migrating piecemeal and want to support _both_ `clojure.java.jdbc` _and_ `next.jdbc` at the same time in your code, you should consider using a datasource as the common way to work with both libraries. You can using `next.jdbc`'s `get-datasource` or the `->pool` function (in `next.jdbc.connection`) to create the a `javax.sql.DataSource` and then build a `db-spec` hash map with it (`{:datasource ds}`) and pass that around your program. `clojure.java.jdbc` calls can use that as-is, `next.jdbc` calls can use `(:datasource db-spec)`, so you don't have to adjust any of your call chains (assuming you're passing `db-spec` around) and you can migrate one function at a time.
If you were using other forms of the `db-spec` hash map, you'll need to adjust to one of the three modes above, since those are the only ones supported in `next.jdbc`.
The `next.jdbc.sql` namespace contains several functions with similarities to `clojure.java.jdbc`'s core API:
* `insert!` -- similar to `clojure.java.jdbc/insert!` but only supports inserting a single map,
* `insert-multi!` -- similar to `clojure.java.jdbc/insert-multi!` but only supports inserting columns and a vector of row values, or a sequence of hash maps _that all have the same keys_ -- unlike `clojure.java.jdbc/insert-multi!`, you should always get a single multi-row insertion,
* `query` -- similar to `clojure.java.jdbc/query`,
* `find-by-keys` -- similar to `clojure.java.jdbc/find-by-keys` but will also accept a partial where clause (vector) instead of a hash map of column name/value pairs,
* `get-by-id` -- similar to `clojure.java.jdbc/get-by-id`,
* `update!` -- similar to `clojure.java.jdbc/update!` but will also accept a hash map of column name/value pairs instead of a partial where clause (vector),
* `delete!` -- similar to `clojure.java.jdbc/delete!` but will also accept a hash map of column name/value pairs instead of a partial where clause (vector).
If you were using `db-do-commands` in `clojure.java.jdbc` to execute DDL, the following is the equivalent in `next.jdbc`:
```clojure
(defn do-commands [connectable commands]
(if (instance? java.sql.Connection connectable)
(with-open [stmt (next.jdbc.prepare/statement connectable)]
(run! #(.addBatch stmt %) commands)
(into [] (.executeBatch stmt)))
(with-open [conn (next.jdbc/get-connection connectable)]
(do-commands conn commands))))
```
### `:identifiers` and `:qualifier`
If you are using `:identifiers`, you will need to change to the appropriate `:builder-fn` option with one of `next.jdbc.result-set`'s `as-*` functions.
`clojure.java.jdbc`'s default is the equivalent of `as-unqualified-lower-maps` (with the caveat that conflicting column names are not made unique -- see the note above in **Rows and Result Sets**). If you specified `:identifiers identity`, you can use `as-unqualified-maps`. If you provided your own string transformation function, you probably want `as-unqualified-modified-maps` and also pass your transformation function as the `:label-fn` option.
If you used `:qualifier`, you can get the same effect with `as-modified-maps` by passing `:qualifier-fn (constantly "my_qualifier")` (and the appropriate `:label-fn` -- either `identity` or `clojure.string/lowercase`).
### `:entities`
If you are using `:entities`, you will need to change to the appropriate `:table-fn`/`:column-fn` options. Table naming and column naming can be controlled separately in `next.jdbc`. Instead of the `quoted` function, there is the `next.jdbc.quoted` namespace which contains functions for the common quoting strategies.
### `:result-set-fn` and `:row-fn`
If you are using `:result-set-fn` and/or `:row-fn`, you will need to change to explicit calls (to the result set function, or to `map` the row function), or to use the `plan` approach with `reduce` or various transducing functions.
> Note: this means that result sets are never exposed lazily in `next.jdbc` -- in `clojure.java.jdbc` you had to be careful that your `:result-set-fn` was eager, but in `next.jdbc` you either reduce the result set eagerly (via `plan`) or you get a fully-realized result set data structure back (from `execute!` and `execute-one!`). As with `clojure.java.jdbc` however, you can still stream result sets from the database and process them via reduction (was `reducible-query`, now `plan`). Remember that you can terminate a reduction early by using the `reduced` function to wrap the final value you produce.
### Processing Database Metadata
There are no metadata-specific functions in `next.jdbc` but those in `clojure.java.jdbc` are only a very thin layer over the raw Java calls. Here's how metadata can be handled in `next.jdbc`:
```clojure
(with-open [con (p/get-connection ds opts)]
(-> (.getMetaData con) ; produces java.sql.DatabaseMetaData
(.getTables nil nil nil (into-array ["TABLE" "VIEW"]))
(rs/datafiable-result-set ds opts)))
```
Several methods on `DatabaseMetaData` return a `ResultSet` object. All of those can be handled similarly.
## Further Minor differences
These are mostly drawn from [Issue #5](https://github.com/seancorfield/next-jdbc/issues/5) although most of the bullets in that issue are described in more detail above.
* Keyword options no longer end in `?` -- for consistency (in `clojure.java.jdbc`, some flag options ended in `?` and some did not; also some options that ended in `?` accepted non-`Boolean` values),
* `with-db-connection` has been replaced by just `with-open` containing a call to `get-connection`,
* `with-transaction` can take a `:rollback-only` option, but there is no built-in way to change a transaction to rollback _dynamically_; either throw an exception (all transactions roll back on an exception) or call `.rollback` directly on the `java.sql.Connection` object (see [Manual Rollback Inside a Transactions](/doc/transactions.md#manual-rollback-inside-a-transaction) and the following section about save points),
* `clojure.java.jdbc` implicitly allowed transactions to nest and just silently ignored the inner, nested transactions (so you only really had the top-level, outermost transaction); `next.jdbc` by default assumes you know what you are doing and so an inner (nested) transaction will commit or rollback the work done so far in outer transaction (and then when that outer transaction ends, the remaining work is rolled back or committed); `next.jdbc.transaction/*nested-tx*` is a dynamic var that can be bound to `:ignore` to get similar behavior to `clojure.java.jdbc`.
* The extension points for setting parameters and reading columns are now `SettableParameter` and `ReadableColumn` protocols.
next-jdbc-1.3.955/doc/prepared-statements.md 0000664 0000000 0000000 00000025045 14700603111 0020631 0 ustar 00root root 0000000 0000000 # Prepared Statements
Under the hood, whenever you ask `next.jdbc` to execute some SQL (via `plan`, `execute!`, `execute-one!` or the "friendly" SQL functions) it calls `prepare` to create a `java.sql.PreparedStatement`, adds in the parameters you provide, and then calls `.execute` on it. Then it attempts to get a `ResultSet` from that and either return it or process it. If you asked for generated keys to be returned, that `ResultSet` will contain those generated keys if your database supports it, otherwise it will be whatever the `.execute` function produces. If no `ResultSet` is available at all, `next.jdbc` will ask for the count of updated rows and return that as if it were a result set.
> Note: Some databases do not support all SQL operations via `PreparedStatement`, in which case you may need to create a `java.sql.Statement` instead, via `next.jdbc.prepare/statement`, and pass that into `plan`, `execute!`, or `execute-one!`, along with the SQL you wish to execute. Note that such statement execution may not have parameters. See the [Prepared Statement Caveat in Getting Started](/doc/getting-started.md#prepared-statement-caveat) for an example.
If you have a SQL operation that you intend to run multiple times on the same `java.sql.Connection`, it may be worth creating the prepared statement yourself and reusing it. `next.jdbc/prepare` accepts a connection and a vector of SQL and optional parameters and returns a `java.sql.PreparedStatement` which can be passed to `plan`, `execute!`, or `execute-one!` as the first argument. It is your responsibility to close the prepared statement after it has been used.
If you need to pass an option map to `plan`, `execute!`, or `execute-one!` when passing a statement or prepared statement, you must pass `nil` or `[]` as the second argument:
```clojure
(with-open [con (jdbc/get-connection ds)]
(with-open [ps (jdbc/prepare con ["..." ...])]
(jdbc/execute-one! ps nil {...})))
(with-open [stmt (jdbc/statement con)]
(jdbc/execute-one! stmt nil {...})))
```
You can provide the parameters in the `prepare` call or you can provide them via a call to `set-parameters` (discussed in more detail below).
```clojure
;; assuming require next.jdbc.prepare :as p
(with-open [con (jdbc/get-connection ds)
ps (jdbc/prepare con ["..."])]
(jdbc/execute-one! (p/set-parameters ps [...])))
```
## Prepared Statement Parameters
If parameters are provided in the vector along with the SQL statement, in the call to `prepare`, then `set-parameter` is behind the scenes called for each of them. This is part of the `SettableParameter` protocol:
* `(set-parameter v ps i)` -- by default this calls `(.setObject ps i v)` (for `nil` and `Object`)
This can be extended to any Clojure data type, to provide a customized way to add specific types of values as parameters to any `PreparedStatement`. For example, to have all `java.time.Instant`, `java.time.LocalDate` and `java.time.LocalDateTime` objects converted to `java.sql.Timestamp` automatically:
```clojure
(extend-protocol p/SettableParameter
java.time.Instant
(set-parameter [^java.time.Instant v ^PreparedStatement ps ^long i]
(.setTimestamp ps i (java.sql.Timestamp/from v)))
java.time.LocalDate
(set-parameter [^java.time.LocalDate v ^PreparedStatement ps ^long i]
(.setTimestamp ps i (java.sql.Timestamp/valueOf (.atStartOfDay v))))
java.time.LocalDateTime
(set-parameter [^java.time.LocalDateTime v ^PreparedStatement ps ^long i]
(.setTimestamp ps i (java.sql.Timestamp/valueOf v))))
```
> Note: those conversions can also be enabled by requiring the [`next.jdbc.date-time` namespace](https://cljdoc.org/d/com.github.seancorfield/next.jdbc/CURRENT/api/next.jdbc.date-time).
You can also extend this protocol via metadata so you can do it on a per-object basis if you need:
```clojure
(with-meta obj {'next.jdbc.prepare/set-parameter (fn [v ps i]...)})
```
The `next.jdbc.types` namespace provides functions to wrap values with per-object implementations of `set-parameter` for every standard `java.sql.Types` value. Each is named `as-xxx` corresponding to `java.sql.Types/XXX`.
The converse, converting database-specific types to Clojure values is handled by the `ReadableColumn` protocol, discussed in the previous section ([Result Set Builders](/doc/result-set-builders.md#readablecolumn)).
As noted above, `next.jdbc.prepare/set-parameters` is available for you to call on any existing `PreparedStatement` to set or update the parameters that will be used when the statement is executed:
* `(set-parameters ps params)` -- loops over a sequence of parameter values and calls `set-parameter` for each one, as above.
If you need more specialized parameter handling than the protocol can provide, then you can create prepared statements explicitly, instead of letting `next.jdbc` do it for you, and then calling your own variant of `set-parameters` to install those parameters.
## Batched Parameters
By default, `next.jdbc` assumes that you are providing a single set of parameter values and then executing the prepared statement. If you want to run a single prepared statement with multiple groups of parameters, you might want to take advantage of the increased performance that may come from using JDBC's batching machinery.
You could do this manually:
```clojure
;; assuming require next.jdbc.prepare :as p
(with-open [con (jdbc/get-connection ds)
ps (jdbc/prepare con ["insert into status (id,name) values (?,?)"])]
(p/set-parameters ps [1 "Approved"])
(.addBatch ps)
(p/set-parameters ps [2 "Rejected"])
(.addBatch ps)
(p/set-parameters ps [3 "New"])
(.addBatch ps)
(.executeBatch ps)) ; returns int[]
```
Here we set parameters and add them in batches to the prepared statement, then we execute the prepared statement in batch mode. You could also do the above like this, assuming you have those groups of parameters in a sequence:
```clojure
(with-open [con (jdbc/get-connection ds)
ps (jdbc/prepare con ["insert into status (id,name) values (?,?)"])]
(run! #(.addBatch (p/set-parameters ps %))
[[1 "Approved"] [2 "Rejected"] [3 "New"]])
(.executeBatch ps)) ; returns int[]
```
Both of those are somewhat ugly and contain a fair bit of boilerplate and Java interop, so a helper function is provided in `next.jdbc` to automate the execution of batched parameters:
```clojure
(with-open [con (jdbc/get-connection ds)
ps (jdbc/prepare con ["insert into status (id,name) values (?,?)"])]
(jdbc/execute-batch! ps [[1 "Approved"] [2 "Rejected"] [3 "New"]]))
;; or:
(jdbc/execute-batch! ds
"insert into status (id,name) values (?,?)"
[[1 "Approved"] [2 "Rejected"] [3 "New"]]
;; options hash map required here to disambiguate
;; this call from the 2- & 3-arity calls
{})
```
By default, this adds all the parameter groups and executes one batched command. It returns a (Clojure) vector of update counts (rather than `int[]`). If you provide an options hash map, you can specify a `:batch-size` and the parameter groups will be partitioned and executed as multiple batched commands. This is intended to allow very large sequences of parameter groups to be executed without running into limitations that may apply to a single batched command. If you expect the update counts to be very large (more than `Integer/MAX_VALUE`), you can specify `:large true` so that `.executeLargeBatch` is called instead of `.executeBatch`.
> Note: not all databases support `.executeLargeBatch`.
If you want to get the generated keys from an `insert` done via `execute-batch!`, you need a couple of extras, compared to the above:
```clojure
(with-open [con (jdbc/get-connection ds)
;; ensure the PreparedStatement will return the keys:
ps (jdbc/prepare con ["insert into status (id,name) values (?,?)"]
{:return-keys true})]
;; this will call .getGeneratedKeys for each batch and return them as a
;; vector of datafiable result sets (the keys in map are database-specific):
(jdbc/execute-batch! ps [[1 "Approved"] [2 "Rejected"] [3 "New"]]
{:return-generated-keys true}))
;; or:
(jdbc/execute-batch! ds
"insert into status (id,name) values (?,?)"
[[1 "Approved"] [2 "Rejected"] [3 "New"]]
{:return-keys true ; for creation of PreparedStatement
:return-generated-keys true}) ; for batch result format
```
This calls `rs/datafiable-result-set` behind the scenes so you can also pass a `:builder-fn` option to `execute-batch!` if you want something other than qualified as-is hash maps.
> Note: not all databases support calling `.getGeneratedKeys` here (everything I test against seems to, except MS SQL Server and SQLite). Some databases will only return one generated key per batch, rather than a generated key for every row inserted. You may need to add `RETURNING *` to your `INSERT` statements instead.
### Caveats
There are several caveats around using batched parameters. Some JDBC drivers need a "hint" in order to perform the batch operation as a single command for the database. In particular, PostgreSQL requires the `:reWriteBatchedInserts true` option and MySQL requires `:rewriteBatchedStatements true` (both non-standard JDBC options, of course!). These should be provided as part of the db-spec hash map when the datasource is created.
In addition, if the batch operation fails for a group of parameters, it is database-specific whether the remaining groups of parameters are used, i.e., whether the operation is performed for any further groups of parameters after the one that failed. The result of calling `execute-batch!` is a vector of integers. Each element of the vector is the number of rows affected by the operation for each group of parameters. `execute-batch!` may throw a `BatchUpdateException` and calling `.getUpdateCounts` (or `.getLargeUpdateCounts`) on the exception may return an array containing a mix of update counts and error values (a Java `int[]` or `long[]`). Some databases don't always return an update count but instead a value indicating the number of rows is not known (but sometimes you can still get the update counts).
Finally, some database drivers don't do batched operations at all -- they accept `.executeBatch` but they run the operation as separate commands for the database rather than a single batched command. Some database drivers do not support `.getGeneratedKeys` (e.g., MS SQL Server and SQLite) so you cannot use `:return-generated-keys` and you need to use `RETURNING *` in your `INSERT` statements instead.
next-jdbc-1.3.955/doc/result-set-builders.md 0000664 0000000 0000000 00000037317 14700603111 0020565 0 ustar 00root root 0000000 0000000 # RowBuilder and ResultSetBuilder
In [Getting Started](/doc/getting-started.md), it was noted that, by default, `execute!` and `execute-one!` return result sets as (vectors of) hash maps with namespace-qualified keys as-is. If your database naturally produces uppercase column names from the JDBC driver, that's what you'll get. If it produces mixed-case names, that's what you'll get.
> Note: Some databases do not return the table name in the metadata by default. If you run into this, you might try adding `:ResultSetMetaDataOptions "1"` to your db-spec (so it is passed as a property to the JDBC driver when you create connections). If your database supports that, it will perform additional work to try to add table names to the result set metadata. It has been reported that Oracle just plain old does not support table names at all in its JDBC drivers.
The default builder for rows and result sets creates qualified keywords that match whatever case the JDBC driver produces. That builder is `next.jdbc.result-set/as-maps` but there are several options available:
* `as-maps` -- table-qualified keywords as-is, the default, e.g., `:ADDRESS/ID`, `:myTable/firstName`,
* `as-unqualified-maps` -- simple keywords as-is, e.g., `:ID`, `:firstName`,
* `as-lower-maps` -- table-qualified lower-case keywords, e.g., `:address/id`, `:mytable/firstname`,
* `as-unqualified-lower-maps` -- simple lower-case keywords, e.g., `:id`, `:firstname`,
* `as-arrays` -- table-qualified keywords as-is (vector of column names, followed by vectors of row values),
* `as-unqualified-arrays` -- simple keywords as-is,
* `as-lower-arrays` -- table-qualified lower-case keywords,
* `as-unqualified-lower-arrays` -- simple lower-case keywords.
The reason behind the default is to a) be a simple transform, b) produce qualified keys in keeping with Clojure's direction (with `clojure.spec` etc), and c) not mess with the data. `as-arrays` is (slightly) faster than `as-maps` since it produces less data (vectors of values instead of vectors of hash maps), but the `lower` options will be slightly slower since they include (conditional) logic to convert strings to lower-case. The `unqualified` options may be slightly faster than their qualified equivalents but **make no attempt to keep column names unique if your SQL joins across multiple tables**.
> Note: This is a deliberate difference from `clojure.java.jdbc` which would make column names unique by appending numeric suffixes. It was always poor practice to rely on `clojure.java.jdbc`'s renaming behavior and it added quite an overhead to result set building, which is why `next.jdbc` does not support it -- use explicit column aliasing in your SQL instead if you want _unqualified_ column names!
In addition, the following generic builders can take `:label-fn` and `:qualifier-fn` options to control how the label and qualified are processed. The `lower` variants above are implemented in terms of these, passing a `lower-case` function for both of those options.
* `as-modified-maps` -- table-qualified keywords,
* `as-unqualified-modified-maps` -- simple keywords,
* `as-modified-arrays` -- table-qualified keywords,
* `as-unqualified-modified-arrays` -- simple keywords.
An example builder that naively converts `snake_case` database table/column names to `kebab-case` keywords:
```clojure
(defn as-kebab-maps [rs opts]
(let [kebab #(str/replace % #"_" "-")]
(result-set/as-modified-maps rs (assoc opts :qualifier-fn kebab :label-fn kebab))))
```
However, a version of `as-kebab-maps` is built-in, as is `as-unqualified-kebab-maps`, which both use the `->kebab-case` function from the [camel-snake-kebab library](https://github.com/clj-commons/camel-snake-kebab/) with `as-modified-maps` and `as-unqualified-modified-maps` respectively, so you can just use the built-in `result-set/as-kebab-maps` (or `result-set/as-unqualified-kebab-maps`) builder as a `:builder-fn` option instead of writing your own.
> Note: `next.jdbc/snake-kebab-opts` and `next.jdbc/unqualified-snake-kebab-opts` exist, providing pre-built options hash maps that contain these `:builder-fn` options, as well as appropriate `:table-fn` and `:column-fn` options for the **Friendly SQL Functions** so those are often the most convenient way to enable snake/kebab case conversions with `next.jdbc`.
And finally there are two styles of adapters for the existing builders that let you override the default way that columns are read from result sets.
The first style takes a `column-reader` function, which is called with the `ResultSet`, the `ResultSetMetaData`, and the column index, and is expected to read the raw column value from the result set and return it. The result is then passed through `read-column-by-index` (from `ReadableColumn`, which may be implemented directly via protocol extension or via metadata on the result of the `column-reader` function):
* `as-maps-adapter` -- adapts an existing map builder function with a new column reader,
* `as-arrays-adapter` -- adapts an existing array builder function with a new column reader.
The default `column-reader` function behavior would be:
```clojure
(defn default-column-reader
[^ResultSet rs ^ResultSetMetaData rsmeta ^Integer i]
(.getObject rs i))
```
An example column reader is provided -- `clob-column-reader` -- that still uses `.getObject` but will expand `java.sql.Clob` values into string (using the `clob->string` helper function):
```clojure
{:builder-fn (result-set/as-maps-adapter
result-set/as-maps
result-set/clob-column-reader)}
```
As of 1.1.569, the second style of adapter relies on `with-column-value` from `RowBuilder` (see below) and allows you to take complete control of the column reading process. This style takes a `column-by-index-fn` function, which is called with the builder itself, the `ResultSet`, and the column index, and is expected to read the raw column value from the result set and perform any and all processing on it, before returning it. The result is added directly to the current row with no further processing.
* `builder-adapter` -- adapts any existing builder function with a new column reading function.
The default `column-by-index-fn` function behavior would be:
```clojure
(defn default-column-by-index-fn
[builder ^ResultSet rs ^Integer i]
(result-set/read-column-by-index (.getObject rs i) (:rsmeta builder) i))
```
Because the builder itself is passed in, the vector of processed column names is available as `(:cols builder)` (in addition to the `ResultSetMetaData` as `(:rsmeta builder)`). This allows you to take different actions based on the metadata or the column name, as well as bypassing the `read-column-by-index` call if you wish.
The older `as-*-adapter` functions are now implemented in terms of this `builder-adapter` because `with-column-value` abstracts away _how_ the new column's value is added to the row being built.
## RowBuilder Protocol
This protocol defines five functions and is used whenever `next.jdbc` needs to materialize a row from a `ResultSet` as a Clojure data structure:
* `(->row builder)` -- produces a new row (a `(transient {})` by default),
* `(column-count builder)` -- returns the number of columns in each row,
* `(with-column builder row i)` -- given the row so far, fetches column `i` from the current row of the `ResultSet`, converts it to a Clojure value, and adds it to the row (for `as-maps` this is a call to `.getObject`, a call to `read-column-by-index` -- see the `ReadableColumn` protocol below, and a call to `assoc!`),
* `(with-column-value builder row col v)` -- given the row so far, the column name, and the column value, add the column name/value to the row in the appropriate way: this is a low-level utility, intended to be used in builders (or adapters) that want to control more of the value handling process -- in general, `with-column` will be implemented by calling `with-column-value`,
* `(row! builder row)` -- completes the row (a `(persistent! row)` call by default).
`execute!` and `execute-one!` call these functions for each row they need to build. `plan` _may_ call these functions if the reducing function causes a row to be materialized.
## ResultSet Protocol
This protocol defines three functions and is used whenever `next.jdbc` needs to materialize a result set (multiple rows) from a `ResultSet` as a Clojure data structure:
* `(->rs builder)` -- produces a new result set (a `(transient [])` by default),
* `(with-row builder rs row)` -- given the result set so far and a new row, returns the updated result set (a `(conj! rs row)` call by default),
* `(rs! builder rs)` -- completes the result set (a `(persistent! rs)` call by default).
Only `execute!` expects this protocol to be implemented. `execute-one!` and `plan` do not call these functions.
## Result Set Builder Functions
The `as-*` functions described above are all implemented in terms of these protocols. They are passed the `ResultSet` object and the options hash map (as passed into various `next.jdbc` functions). They return an implementation of the protocols that is then used to build rows and the result set. Note that the `ResultSet` passed in is _mutable_ and is advanced from row to row by the SQL execution function, so each time `->row` is called, the underlying `ResultSet` object points at each new row in turn. By contrast, `->rs` (which is only called by `execute!`) is invoked _before_ the `ResultSet` is advanced to the first row.
The result set builder implementation is also assumed to implement `clojure.lang.ILookup` such that the keys `:cols` and `:rsmeta` are supported and should map to the vector of column names that the builder will produce and the `ResultSetMetaData` object (which can be obtained from the `ResultSet`, if necessary). This is intended to allow `plan` and various builder adapters to access certain information that may be needed for processing results. The default builder implementations (for maps and arrays) are both records with fields `rsmeta` and `cols` (in addition to `rs` -- the `ResultSet` itself). The adapters provided in `next.jdbc.result-set` returned reified implementations that delegate field lookup to the underlying builder implementation.
The options hash map for any `next.jdbc` function can contain a `:builder-fn` key and the value is used as the row/result set builder function. The tests for `next.jdbc.result-set` include a [record-based builder function](https://github.com/seancorfield/next-jdbc/blob/develop/test/next/jdbc/result_set_test.clj#L335-L353) as an example of how you can extend this to satisfy your needs.
> Note: When `next.jdbc` cannot obtain a `ResultSet` object and returns `{:next.jdbc/count N}` instead, the builder function is not applied -- the `:builder-fn` option does not affect the shape of the result.
The options hash map passed to the builder function will contain a `:next.jdbc/sql-params` key, whose value is the SQL + parameters vector passed into the top-level `next.jdbc` functions (`plan`, `execute!`, and `execute-one!`).
There is also a convenience function, `datafiable-result-set`, that accepts a `ResultSet` object (and a connectable and an options hash map) and returns a fully realized result set, per the `:builder-fn` option (or `as-maps` if that option is omitted).
The array-based builders warrant special mention:
* When used with `execute!`, the array-based builders will produce a data structure that is a vector of vectors, with the first element being a vector of column names and subsequent elements being vectors of column values in the same corresponding order. The order of column names and values follows the "natural" order from the SQL operation, as determined by the underlying `ResultSet`.
* When used with `execute-one!`, the array-based builders will produce a single vector containing the column values in the "natural" SQL order but you will not get the corresponding column names back.
* When used with `plan`, the array-based builders will cause each abstract row to represent a vector of column values rather than a hash map which limits the operations you can perform on the abstraction to just `Associative` (`get` with a numeric key), `Counted` (`count`), and `Indexed` (`nth`). All other operations will either realize a vector, as if by calling `datafiable-row`, or will fail if the operation does not make sense on a vector (as opposed to a hash map).
## `next.jdbc.optional`
This namespace contains variants of the six `as-maps`-style builders above that omit keys from the row hash maps if the corresponding column is `NULL`. This is in keeping with Clojure's views of "optionality" -- that optional elements should simply be omitted -- and is provided as an "opt-in" style of rows and result sets.
# ReadableColumn
As mentioned above, when `with-column` is called, the expectation is that the row builder will call `.getObject` on the current state of the `ResultSet` object with the column index and will then call `read-column-by-index`, passing the column value, the `ResultSetMetaData`, and the column index. That function is part of the `ReadableColumn` protocol that you can extend to handle conversion of arbitrary database-specific types to Clojure values. It is extensible via metadata so the value you return can have metadata specifying the implementation of `read-column-by-index`.
If you need more control over how values are read from the `ResultSet` object, you can use `next.jdbc.result-set/as-maps-adapter` (or `next.jdbc.result-set/as-arrays-adapter`, or the more low-level but more generic `next.jdbc.result-set/builder-adapter`) which takes an existing builder function and a column reading function and returns a new builder function that calls your column reading function (with the `ResultSet` object, the `ResultSetMetaData` object, and the column index -- or the builder itself, the `ResultSet` object, and the column index in the case of `builder-adapter`) instead of calling `.getObject` directly.
Note that the `as-*` adapters still call `read-column-by-index` on the value your column reading function returns.
In addition, inside `plan`, as each value is looked up by name in the current state of the `ResultSet` object, the `read-column-by-label` function is called, again passing the column value and the column label (the name used in the SQL to identify that column). This function is also part of the `ReadableColumn` protocol.
The default implementation of this protocol is for these two functions to return `nil` as `nil`, a `Boolean` value as a canonical `true` or `false` value (unfortunately, JDBC drivers cannot be relied on to return unique values here!), and for all other objects to be returned as-is.
`next.jdbc` makes no assumptions beyond `nil` and `Boolean`, but common extensions here could include converting `java.sql.Date` to `java.time.LocalDate` and `java.sql.Timestamp` to `java.time.Instant` for example:
```clojure
(extend-protocol rs/ReadableColumn
java.sql.Date
(read-column-by-label [^java.sql.Date v _]
(.toLocalDate v))
(read-column-by-index [^java.sql.Date v _2 _3]
(.toLocalDate v))
java.sql.Timestamp
(read-column-by-label [^java.sql.Timestamp v _]
(.toInstant v))
(read-column-by-index [^java.sql.Timestamp v _2 _3]
(.toInstant v)))
```
Remember that a protocol extension will apply to all code running in your application so with the above code **all** timestamp values coming from the database will be converted to `java.time.Instant` for all queries. If you want to control behavior across different calls, consider the adapters described above (`as-maps-adapter`, `as-arrays-adapter`, and `builder-adapter`, and think about using metadata to implement the `rs/ReadableColumn` protocol instead of extending it).
Note that the converse, converting Clojure values to database-specific types is handled by the `SettableParameter` protocol, discussed in the next section ([Prepared Statements](/doc/prepared-statements.md#prepared-statement-parameters)).
next-jdbc-1.3.955/doc/tips-and-tricks.md 0000664 0000000 0000000 00000076011 14700603111 0017655 0 ustar 00root root 0000000 0000000 # Tips & Tricks
This page contains various tips and tricks that make it easier to use `next.jdbc` with a variety of databases. It is mostly organized by database, but there are a few that are cross-database and those are listed first.
## CLOB & BLOB SQL Types
Columns declared with the `CLOB` or `BLOB` SQL types are typically rendered into Clojure result sets as database-specific custom types but they should implement `java.sql.Clob` or `java.sql.Blob` (as appropriate). In general, you can only read the data out of those Java objects during the current transaction, which effectively means that you need to do it either inside the reduction (for `plan`) or inside the result set builder (for `execute!` or `execute-one!`). If you always treat these types the same way for all columns across the whole of your application, you could simply extend `next.jdbc.result-set/ReadableColumn` to `java.sql.Clob` (and/or `java.sql.Blob`). Here's an example for reading `CLOB` into a `String`:
```clojure
(extend-protocol rs/ReadableColumn
java.sql.Clob
(read-column-by-label [^java.sql.Clob v _]
(with-open [rdr (.getCharacterStream v)] (slurp rdr)))
(read-column-by-index [^java.sql.Clob v _2 _3]
(with-open [rdr (.getCharacterStream v)] (slurp rdr))))
```
There is a helper in `next.jdbc.result-set` to make this easier -- `clob->string`:
```clojure
(extend-protocol rs/ReadableColumn
java.sql.Clob
(read-column-by-label [^java.sql.Clob v _]
(rs/clob->string v))
(read-column-by-index [^java.sql.Clob v _2 _3]
(rs/clob->string v)))
```
As noted in [Result Set Builders](/doc/result-set-builders.md), there is also `clob-column-reader` that can be used with the `as-*-adapter` result set builder functions.
No helper or column reader is provided for `BLOB` data since it is expected that the semantics of any given binary data will be application specific. For a raw `byte[]` you could probably use:
```clojure
(.getBytes v 1 (.length v)) ; BLOB has 1-based byte index!
```
Consult the [java.sql.Blob documentation](https://docs.oracle.com/javase/8/docs/api/java/sql/Blob.html) for more ways to process it.
> Note: the standard MySQL JDBC driver seems to return `BLOB` data as `byte[]` instead of `java.sql.Blob`.
## Exceptions
A lot of JDBC operations can fail with an exception. JDBC 4.0 has a
[well-defined hierarchy of exception types](https://docs.oracle.com/en/java/javase/17/docs/api/java.sql/java/sql/package-tree.html)
and you can often catch a specific type of exception to do useful handling
of various error conditions that you might "expect" when working with a
database.
A good example is [SQLIntegrityConstraintViolationException](https://docs.oracle.com/en/java/javase/17/docs/api/java.sql/java/sql/SQLIntegrityConstraintViolationException.html)
which typically represents an index/key constraint violation such as a
duplicate primary key insertion attempt.
However, like some other areas when dealing with JDBC, the reality can
be very database-specific. Some database drivers **don't** use the hierarchy
above -- notably PostgreSQL, which has a generic `PSQLException` type
with its own subclasses and semantics. See [PostgreSQL JDBC issue #963](https://github.com/pgjdbc/pgjdbc/issues/963)
for a discussion of the difficulty in adopting the standard JDBC hierarchy
(dating back five years).
The `java.sql.SQLException` class provides `.getErrorCode()` and
`.getSQLState()` methods but the values returned by those are
explicitly vendor-specific (error code) or only partly standardized (state).
In theory, the SQL state should follow either the X/Open (Open Group) or
ANSI SQL 2003 conventions, both of which were behind paywalls(!). The most
complete public listing is probably the IBM DB2
[SQL State](https://www.ibm.com/docs/en/db2woc?topic=messages-sqlstate)
document.
See also this [Stack Overflow post about SQL State](https://stackoverflow.com/questions/1399574/what-are-all-the-possible-values-for-sqlexception-getsqlstate)
for more references and links. Not all database drivers follow either of
these conventions for SQL State so you may still have to consult your
vendor's specific documentation.
All of this makes writing _generic_ error handling, that works across
multiple databases, very hard indeed. You can't rely on the JDBC `SQLException`
hierarchy; you can sometimes rely on a subset of SQL State values.
## Handling Timeouts
JDBC provides a number of ways in which you can decide how long an operation should run before it times out. Some of these timeouts are specified in seconds and some are in milliseconds. Some are handled via connection properties (or JDBC URL parameters), some are handled via methods on various JDBC objects.
Here's how to specify various timeouts using `next.jdbc`:
* `connectTimeout` -- can be specified via the "db-spec" hash map or in a JDBC URL, it is the number of **milliseconds** that JDBC should wait for the initial (socket) connection to complete. Database-specific (may be MySQL only?).
* `loginTimeout` -- can be set via `.setLoginTimeout()` on a `DriverManager` or `DataSource`, it is the number of **seconds** that JDBC should wait for a connection to the database to be made. `next.jdbc` exposes this on the `javax.sql.DataSource` object it reifies from calling `get-datasource` on a "db-spec" hash map or JDBC URL string.
* `queryTimeout` -- can be set via `.setQueryTimeout()` on a `Statement` (or `PreparedStatement`), it is the number of **seconds** that JDBC should wait for a SQL statement to complete. Since this is the most commonly used type of timeout, `next.jdbc` exposes this via the `:timeout` option which can be passed to any function that may construct a `Statement` or `PreparedStatement`.
* `socketTimeout` -- can be specified via the "db-spec" hash map or in a JDBC URL, it is the number of milliseconds that JDBC should wait for socket operations to complete. Database-specific (MS SQL Server and MySQL support this, other databases may too).
Examples:
```clojure
;; connectTimeout / socketTimeout via db-spec:
(def db-spec {:dbtype "mysql" :dbname "example" :user "root" :password "secret"
;; milliseconds:
:connectTimeout 60000 :socketTimeout 30000}))
;; socketTimeout via JDBC URL:
(def db-url (str "jdbc:sqlserver://localhost;user=sa;password=secret"
;; milliseconds:
";database=model;socketTimeout=10000"))
;; loginTimeout via DataSource:
(def ds (jdbc/get-datasource db-spec))
(.setLoginTimeout ds 20) ; seconds
;; queryTimeout via options:
(jdbc/execute! ds ["select * from some_table"] {:timeout 5}) ; seconds
;; queryTimeout via method call:
(let [ps (jdbc/prepare ds ["select * from some_table"])]
(.setQueryTimeout ps 10) ; seconds
(jdbc/execute! ps))
```
## Reducing and Folding with `plan`
Most of this documentation describes using `plan` specifically for reducing and notes that you can avoid the overhead of realizing rows from the `ResultSet` into Clojure data structures if your reducing function uses only functions that get column values by name. If you perform any function on the row that would require an actual hash map or a sequence, the row will be realized into a full Clojure hash map via the builder function passed in the options (or via `next.jdbc.result-set/as-maps` by default).
One of the benefits of reducing over `plan` is that you can stream very large result sets, very efficiently, without having the entire result set in memory (assuming your reducing function doesn't build a data structure that is too large!). See the tips below on **Streaming Result Sets**.
If you want to process a `plan` result purely for side-effects, without producing a result,
you can use `run!` instead of `reduce`:
```clojure
(run! process-row (jdbc/plan ds ...))
```
`run!` is based on `reduce` and `process-row` here takes just one argument --
the row -- rather than the usual reducing function that takes two.
The result of `plan` is also foldable in the [clojure.core.reducers](https://clojure.org/reference/reducers) sense. While you could use `execute!` to produce a vector of fully-realized rows as hash maps and then fold that vector (Clojure's vectors support fork-join parallel reduce-combine), that wouldn't be possible for very large result sets. If you fold the result of `plan`, the result set will be partitioned and processed using fork-join parallel reduce-combine. Unlike reducing over `plan`, each row **is** realized into a Clojure data structure and each batch is forked for reduction as soon as that many rows have been realized. By default, `fold`'s batch size is 512 but you can specify a different value in the 4-arity call. Once the entire result set has been read, the last (partial) batch is forked for reduction. The combining operations are forked and interleaved with the reducing operations, so the order (of forked tasks) is batch-1, batch-2, combine-1-2, batch-3, combine-1&2-3, batch-4, combine-1&2&3-4, etc. The amount of parallelization you get will depend on many factors including the number of processors, the speed of your reducing function, the speed of your combining function, and the speed with which result sets can actually be streamed from your database.
There is no back pressure here so if your reducing function is slow, you may end up with more of the realized result set in memory than your system can cope with.
## Times, Dates, and Timezones
Working with dates and timezones in databases can be confusing, as you are
working at the intersection between the database, the JDBC library and the
date library that you happen to be using. A good rule of thumb is to keep
timezone-related logic as simple as possible. For example, with Postgres we
recommend always storing dates in a Postgres `TIMESTAMP` (without time zone)
column, storing all such timestamps in UTC, and applying your time zone logic
separately using application logic. The `TIMESTAMP WITH TIME ZONE` column type in
Postgres stores its date in UTC anyhow, and applications that need to deal with
time zones typically require richer functionality than simply adjusting the time
zone to wherever the database happens to be hosted. Treat time zone related
logic as an application concern, and keep stored dates in UTC.
For example, for a developer using [`clojure.java-time`](https://github.com/dm3/clojure.java-time), saving `(java-time/instant)`
in a timestamp column (and doing any timezone adjustment elsewhere) is a good
way to minimize long term confusion.
> Original text contributed by [Denis McCarthy](https://github.com/denismccarthykerry); in addition: I generally recommend not only using UTC everywhere but also setting your database _and your servers_ to all be in the UTC timezones, to avoid the possibly of incorrect date/time translations -- Sean Corfield.
## MS SQL Server
In MS SQL Server, the generated key from an insert comes back as `:GENERATED_KEYS`.
By default, you won't get table names as qualifiers with Microsoft's JDBC driver (you might with the jTDS drive -- I haven't tried that recently). See this [MSDN forum post about `.getTableName()`](https://social.msdn.microsoft.com/Forums/sqlserver/en-US/55e8cbb2-b11c-446e-93ab-dc30658caf99/resultsetmetadatagettablename-returns-instead-of-table-name) for details. According to one of the answers posted there, if you specify `:result-type` and `:concurrency` in the options for `execute!`, `execute-one!`, `plan`, or `prepare`, that will cause SQL Server to return table names for columns. `:result-type` needs to be `:scoll-sensitive` or `:scroll-insensitive` for this to work. `:concurrency` can be `:read-only` or `:updatable`.
MS SQL Server supports execution of multiple statements when surrounded by `begin`/`end` and can return multiple result sets, when requested via `:multi-rs true` on `execute!`.
```clojure
(jdbc/execute! db-spec ["begin select * from table1; select * from table2; end"] {:multi-rs true})
;; vector of result sets:
=> [[{.. table1 row ..} {.. table1 row ..}]
[{.. table2 row ..} {.. table2 row ..} {..}]]
```
### Batch Statements
Even when using `next.jdbc/execute-batch!`, Microsoft's JDBC driver will still send multiple insert statements to the database unless you specify `:useBulkCopyForBatchInsert true` as part of the db-spec hash map or JDBC URL when the datasource is created.
To use this feature your Microsoft's JDBC driver should be at least version 9.2 and you can use only limited set of data types. For example if you use `inst` to bulk insert smalldatetime value driver will revert to old (slow) behavior. For more details see [Using bulk copy API for batch insert operation](https://docs.microsoft.com/en-us/sql/connect/jdbc/use-bulk-copy-api-batch-insert-operation?view=sql-server-ver16) and [Release notes for JDBC drivers](https://docs.microsoft.com/en-us/sql/connect/jdbc/release-notes-for-the-jdbc-driver?view=sql-server-ver16).
## MySQL & MariaDB
In MySQL, the generated key from an insert comes back as `:GENERATED_KEY`. In MariaDB, the generated key from an insert comes back as `:insert_id`.
MySQL generally stores tables as files so they are case-sensitive if your O/S is (Linux) or case-insensitive if your O/S is not (Mac, Windows) but the column names are generally case-insensitive. This can matter when if you use `next.jdbc.result-set/as-lower-maps` because that will lower-case the table names (as well as the column names) so if you are round-tripping based on the keys you get back, you may produce an incorrect table name in terms of case. You'll also need to be careful about `:table-fn`/`:column-fn` because of this.
It's also worth noting that column comparisons are case-insensitive so `WHERE foo = 'BAR'` will match `"bar"` or `"BAR"` etc.
MySQL has a connection option, `:allowMultiQueries true`, that allows you to pass multiple SQL statements in a single operation and can return multiple result sets, when requested via `:multi-rs true`.
```clojure
(def db-spec {:dbtype "mysql" .. :allowMultiQueries true})
;; equivalent to allowMultiQueries=true in the JDBC URL
(jdbc/execute! db-spec ["select * from table1; select * from table2"] {:multi-rs true})
;; vector of result sets:
=> [[{.. table1 row ..} {.. table1 row ..}]
[{.. table2 row ..} {.. table2 row ..} {..}]]
```
Compare this with MS SQL Server above: MySQL does not support `begin`/`end` here. This is not the default behavior because allowing multiple statements in a single operation is generally considered a bit of a risk as it can make it easier for SQL injection attacks to be performed.
### Batch Statements
Even when using `next.jdbc/execute-batch!`, MySQL will still send multiple statements to the database unless you specify `:rewriteBatchedStatements true` as part of the db-spec hash map or JDBC URL when the datasource is created.
### Streaming Result Sets
You should be able to get MySQL to stream very large result sets (when you are reducing over `plan`) by setting the following options:
* `:fetch-size Integer/MIN_VALUE` -- when running `plan` (or when creating a `PreparedStatement`).
> Note: it's possible that other options may be required as well -- I have not verified this yet -- see, for example, the additional options PostgreSQL requires, below.
## Oracle
Ah, dear old Oracle! Over the years of maintaining `clojure.java.jdbc` and now `next.jdbc`, I've had all sorts of bizarre and non-standard behavior reported from Oracle users. The main issue I'm aware of with `next.jdbc` is that Oracle's JDBC drivers all return an empty string from `ResultSetMetaData.getTableName()` so you won't get qualified keywords in the result set hash maps. Sorry!
An important performance issue to be aware of with Oracle's JDBC driver is that the default fetch size is just 10 records. If you are working with large datasets, you will
either need to either specify `:prefetch` in your db-spec hash map with a suitable value (say 1,000 or larger), or specify `&prefetch=` in your JDBC URL string. If you want
to keep the default, you can change it on a per-statement basis by specifying `:fetch-size` as an option to `execute!` etc.
If you are using the 10g or later JDBC driver and you try to execute DDL statements that include SQL entities
that start with a `:` (such as `:new` or `:old`), they will be treated as bindable parameter references if
you use a `PreparedStatement` to execute them. Since that's the default for `execute!` etc, it means that you
will likely get an error like the following:
```
Missing IN or OUT parameter at index:: 1
```
You will need to use `next.jdbc.prepare/statement` to create a `Statement` object and then call `execute!`
on that to avoid this error. Don't forget to `.close` the `Statement` after `execute!` -- using `with-open`
is the best way to ensure the statement is properly closed after use.
## PostgreSQL
As you can see in this section (and elsewhere in this documentation), the
PostgreSQL JDBC driver has a number of interesting quirks and behaviors that
you need to be aware of. Although accessing PostgreSQL via JDBC is the most
common approach, there is also a non-JDBC Clojure/Java driver for PostgreSQL called
[PG2](https://github.com/igrishaev/pg2) which supports JSON operations natively
(see below for what's required for JDBC), as well as supporting Java Time natively
(see the section above about **Times, Dates, and Timezones**), and it also
quite a bit faster than using JDBC.
When you use `:return-keys true` with `execute!` or `execute-one!` (or you use `insert!`), PostgreSQL returns the entire inserted row (unlike nearly every other database that just returns any generated keys!).
The default result set builder for `next.jdbc` is `as-qualified-maps` which
uses the `.getTableName()` method on `ResultSetMetaData` to qualify the
columns in the result set. While some database drivers have this information
on hand from the original SQL operation, PostgreSQL's JDBC driver does not
and it performs an extra SQL query to fetch table names the first time this
method is called for each query. If you want to avoid those extra queries,
and you can live with unqualified column names, you can use `as-unqualified-maps`
as the result set builder instead.
If you have a query where you want to select where a column is `IN` a sequence of values, you can use `col = ANY(?)` with a native array of the values instead of `IN (?,?,?,,,?)` and a sequence of values. **Be aware of
[PostgreSQL bug 17822](https://www.postgresql.org/message-id/flat/17922-1e2e0aeedd294424%40postgresql.org)
which can cause pathological performance when the array has a single element!**
If you think you might have a single-element array, consider using `UNNEST` and
`IN` instead.
What does this mean for your use of `next.jdbc`? In `plan`, `execute!`, and `execute-one!`, you can use `col = ANY(?)` in the SQL string and a single primitive array parameter, such as `(int-array [1 2 3 4])`. That means that in `next.jdbc.sql`'s functions that take a where clause (`find-by-keys`, `update!`, and `delete!`) you can specify `["col = ANY(?)" (int-array data)]` for what would be a `col IN (?,?,?,,,?)` where clause for other databases and require multiple values.
### Batch Statements
Even when using `next.jdbc/execute-batch!`, PostgreSQL will still send multiple statements to the database unless you specify `:reWriteBatchedInserts true` as part of the db-spec hash map or JDBC URL when the datasource is created.
### Streaming Result Sets
You can get PostgreSQL to stream very large result sets (when you are reducing over `plan`) by setting the following options:
* `:auto-commit false` -- when opening the connection
* `:fetch-size 4000, :concurrency :read-only, :cursors :close, :result-type :forward-only` -- when running `plan` (or when creating a `PreparedStatement`).
### Working with Arrays
ResultSet protocol extension to read SQL arrays as Clojure vectors.
```clojure
(import '[java.sql Array])
(require '[next.jdbc.result-set :as rs])
(extend-protocol rs/ReadableColumn
Array
(read-column-by-label [^Array v _] (vec (.getArray v)))
(read-column-by-index [^Array v _ _] (vec (.getArray v))))
```
Insert and read vector example:
```sql
create table example(
tags varchar[]
);
```
```clojure
(execute-one! db-spec
["insert into example(tags) values (?)"
(into-array String ["tag1" "tag2"])])
(execute-one! db-spec
["select * from example limit 1"])
;; => #:example{:tags ["tag1" "tag2"]}
```
> Note: PostgreSQL JDBC driver supports only 7 primitive array types, but not array types like `UUID[]` -
[PostgreSQLâ„¢ Extensions to the JDBC API](https://jdbc.postgresql.org/documentation/server-prepare/#arrays).
### Working with Date and Time
By default, PostgreSQL's JDBC driver does not always perform conversions from `java.util.Date` to a SQL data type.
You can enable this by extending `SettableParameter` to the appropriate (Java) types, or by simply requiring [`next.jdbc.date-time`](https://cljdoc.org/d/com.github.seancorfield/next.jdbc/CURRENT/api/next.jdbc.date-time).
In addition, if you want `java.time.Instant`, `java.time.LocalDate`, and `java.time.LocalDateTime` to be automatically converted to SQL data types, requiring `next.jdbc.date-time` will enable those as well (by extending `SettableParameter` for you).
`next.jdbc.date-time` also includes functions that you can call at application startup to extend `ReadableColumn` to either return `java.time.Instant` or `java.time.LocalDate`/`java.time.LocalDateTime` (as well as a function to restore the default behavior of returning `java.sql.Date` and `java.sql.Timestamp`).
### Working with Interval
Postgres has a nonstandard SQL type Interval that is implemented in the Postgres driver as the `org.postgresql.util.PGInterval` type.
In many cases you would want to work with intervals as `java.time.Duration` type by default.
You can support `Duration` instances by extending `SettableParameter` to the `java.time.Duration` type.
Conversely you can support converting PGIntervals back to Durations by extending `ReadableColumn` to the `org.postgresql.util.PGInterval` type.
```clojure
(import '[org.postgresql.util PGInterval])
(import '[java.sql PreparedStatement])
(import '[java.time Duration])
(require '[next.jdbc.result-set :as rs])
(require '[next.jdbc.prepare :as p])
(defn ->pg-interval
"Takes a Dudration instance and converts it into a PGInterval
instance where the interval is created as a number of seconds."
[^java.time.Duration duration]
(doto (PGInterval.)
(.setSeconds (.getSeconds duration))))
(extend-protocol p/SettableParameter
;; Convert durations to PGIntervals before inserting into db
java.time.Duration
(set-parameter [^java.time.Duration v ^PreparedStatement s ^long i]
(.setObject s i (->pg-interval v))))
(defn <-pg-interval
"Takes a PGInterval instance and converts it into a Duration
instance. Ignore sub-second units."
[^org.postgresql.util.PGInterval interval]
(-> Duration/ZERO
(.plusSeconds (.getSeconds interval))
(.plusMinutes (.getMinutes interval))
(.plusHours (.getHours interval))
(.plusDays (.getDays interval))))
(extend-protocol rs/ReadableColumn
;; Convert PGIntervals back to durations
org.postgresql.util.PGInterval
(read-column-by-label [^org.postgresql.util.PGInterval v _]
(<-pg-interval v))
(read-column-by-index [^org.postgresql.util.PGInterval v _2 _3]
(<-pg-interval v)))
```
### Working with Enumerated Types
PostgreSQL has a SQL extension for defining enumerated types and the default `set-parameter` implementation will not work for those. You can use `next.jdbc.types/as-other` to wrap string values in a way that the JDBC driver will convert them to enumerated type values:
```sql
CREATE TYPE language AS ENUM('en','fr','de');
CREATE TABLE person (
...
speaks language NOT NULL,
...
);
```
```clojure
(require '[next.jdbc.sql :as sql]
'[next.jdbc.types :refer [as-other]])
(sql/insert! ds :person {:speaks (as-other "fr")})
```
That call produces a vector `["fr"]` with metadata that implements `set-parameter` such that `.setObject()` is called with `java.sql.Types/OTHER` which allows PostgreSQL to "convert" the string `"fr"` to the corresponding `language` enumerated type value.
### Working with JSON and JSONB
PostgreSQL has good support for [storing, querying and manipulating JSON data](https://www.postgresql.org/docs/current/datatype-json.html). Basic Clojure data structures (lists, vectors, and maps) transform pretty well to JSON data. With a little help `next.jdbc` can automatically convert Clojure data to JSON and back for us.
> Note: some PostgreSQL JSONB operators have a `?` in them which conflicts with the standard parameter placeholder in SQL. You can write the JSONB operators by doubling up the `?`, e.g., `??|` instead of just `?|`. See [PostgreSQL JSONB operators](https://www.postgresql.org/docs/current/functions-json.html#FUNCTIONS-JSONB-OP-TABLE) for more detail.
First we define functions for JSON encoding and decoding. We're using [metosin/jsonista](https://github.com/metosin/jsonista) in these examples but you could use any JSON library, such as [Cheshire](https://github.com/dakrone/cheshire) or [clojure.data.json](https://github.com/clojure/data.json).
```clojure
(require '[jsonista.core :as json])
;; :decode-key-fn here specifies that JSON-keys will become keywords:
(def mapper (json/object-mapper {:decode-key-fn keyword}))
(def ->json json/write-value-as-string)
(def <-json #(json/read-value % mapper))
```
Next we create helper functions to transform Clojure data to and from PostgreSQL Objects
containing JSON:
```clojure
(import '(org.postgresql.util PGobject))
(defn ->pgobject
"Transforms Clojure data to a PGobject that contains the data as
JSON. PGObject type defaults to `jsonb` but can be changed via
metadata key `:pgtype`"
[x]
(let [pgtype (or (:pgtype (meta x)) "jsonb")]
(doto (PGobject.)
(.setType pgtype)
(.setValue (->json x)))))
(defn <-pgobject
"Transform PGobject containing `json` or `jsonb` value to Clojure data."
[^PGobject v]
(let [type (.getType v)
value (.getValue v)]
(if (#{"jsonb" "json"} type)
(some-> value <-json (with-meta {:pgtype type}))
value)))
```
Finally we extend `next.jdbc.prepare/SettableParameter` and `next.jdbc.result-set/ReadableColumn` protocols to make the conversion between clojure data and PGobject JSON automatic:
```clojure
(require '[next.jdbc.prepare :as prepare])
(require '[next.jdbc.result-set :as rs])
(import '[java.sql PreparedStatement])
(set! *warn-on-reflection* true)
;; if a SQL parameter is a Clojure hash map or vector, it'll be transformed
;; to a PGobject for JSON/JSONB:
(extend-protocol prepare/SettableParameter
clojure.lang.IPersistentMap
(set-parameter [m ^PreparedStatement s i]
(.setObject s i (->pgobject m)))
clojure.lang.IPersistentVector
(set-parameter [v ^PreparedStatement s i]
(.setObject s i (->pgobject v))))
;; if a row contains a PGobject then we'll convert them to Clojure data
;; while reading (if column is either "json" or "jsonb" type):
(extend-protocol rs/ReadableColumn
org.postgresql.util.PGobject
(read-column-by-label [^org.postgresql.util.PGobject v _]
(<-pgobject v))
(read-column-by-index [^org.postgresql.util.PGobject v _2 _3]
(<-pgobject v)))
```
#### Inserting and Querying JSON
Let's assume we have following table:
``` sql
create table demo (
id serial primary key,
doc_jsonb jsonb,
doc_json json
)
```
We can now insert Clojure data into json and jsonb fields:
```clojure
(require '[next.jdbc :as jdbc])
(require '[next.jdbc.sql :as sql])
(def db { ...db-spec here... })
(def ds (jdbc/get-datasource db))
(def test-map
{:some-key "some val" :nested {:a 1} :null-val nil :vector [1 2 3]})
(def data1
{:doc_jsonb test-map
:doc_json (with-meta test-map {:pgtype "json"})})
(sql/insert! ds :demo data1)
(def test-vector
[{:a 1} nil 2 "lalala" []])
(def data2
{:doc_jsonb test-vector
:doc_json (with-meta test-vector {:pgtype "json"})})
(sql/insert! ds :demo data2)
```
And those columns are nicely transformed into Clojure data when querying:
```clojure
(sql/get-by-id ds :demo 1)
=> #:demo{:id 1,
:doc_json
{:some-key "some val",
:nested {:a 1},
:vector [1 2 3],
:null-val nil},
:doc_jsonb
{:some-key "some val",
:nested {:a 1},
:vector [1 2 3],
:null-val nil}}
(sql/get-by-id ds :demo 2)
=> #:demo{:id 2,
:doc_json [{:a 1} nil 2 "lalala" []],
:doc_jsonb [{:a 1} nil 2 "lalala" []]}
;; Query by value of JSON field 'some-key'
(sql/query ds [(str "select id, doc_jsonb::json->'nested' as foo"
" from demo where doc_jsonb::json->>'some-key' = ?")
"some val"])
=> [{:demo/id 1, :foo {:a 1}}]
```
#### Using HoneySQL with JSON and JSONB
If you are using HoneySQL to generate your SQL, there will be an inherent conflict
between the data structures you are intending HoneySQL to interpret -- as function calls
and SQL statements -- and the data structures you intend to treat as JSON. See
[General Reference > Working with JSON/JSONB (PostgreSQL)](https://cljdoc.org/d/com.github.seancorfield/honeysql/CURRENT/doc/getting-started/general-reference#working-with-jsonjsonb-postgresql)
in the HoneySQL documentation for more details.
#### JSON or JSONB?
* A `json` column stores JSON data as strings (reading and writing is fast but manipulation is slow, field order is preserved)
* A `jsonb` column stores JSON data in binary format (manipulation is significantly faster but reading and writing is a little slower)
If you're unsure whether you want to use json or jsonb, use jsonb.
## SQLite
SQLite supports both `bool` and `bit` column types but, unlike pretty much every other database out there, it yields `0` or `1` as the column value instead of `false` or `true`. This means that with SQLite alone, you can't just rely on `bool` or `bit` columns being treated as truthy/falsey values in Clojure.
You can work around this using a builder that handles reading the column directly as a `Boolean`:
```clojure
(import java.sql ResultSet ResultSetMetaData)
(jdbc/execute! ds ["select * from some_table"]
{:builder-fn (rs/builder-adapter
rs/as-maps
(fn [builder ^ResultSet rs ^Integer i]
(let [rsm ^ResultSetMetaData (:rsmeta builder)]
(rs/read-column-by-index
(if (#{"BIT" "BOOL" "BOOLEAN"} (.getColumnTypeName rsm i))
(.getBoolean rs i)
(.getObject rs i))
rsm
i))))})
```
If you are using `plan`, you'll most likely be accessing columns by just the label (as a keyword) and avoiding the result set building machinery completely. In such cases, you'll still get `bool` and `bit` columns back as `0` or `1` and you'll need to explicitly convert them on a per-column basis since you should know which columns need converting:
```clojure
(reduce (fn [acc row]
(conj acc (-> (select-keys row [:name :is_active])
(update :is_active pos?))))
[]
(jdbc/plan ds ["select * from some_table"]))
```
See also [`datafy`, `nav`, and `:schema` > **SQLite**](/doc/datafy-nav-and-schema.md#sqlite)
for additional caveats on the `next.jdbc.datafy` namespace when using SQLite.
next-jdbc-1.3.955/doc/transactions.md 0000664 0000000 0000000 00000020011 14700603111 0017336 0 ustar 00root root 0000000 0000000 # Transactions
The `transact` function and `with-transaction` macro were briefly mentioned in the [Getting Started](/doc/getting-started.md) section but we'll go into more detail here.
Although `(transact transactable f)` is available, it is expected that you will mostly use `(with-transaction [tx transactable] body...)` when you want to execute multiple SQL operations in the context of a single transaction so that is what this section focuses on.
## Connection-level Control
By default, all connections that `next.jdbc` creates are automatically committable, i.e., as each operation is performed, the effect is committed to the database directly before the next operation is performed. Any exceptions only cause the current operation to be aborted -- any prior operations have already been committed.
It is possible to tell `next.jdbc` to create connections that do not automatically commit operations: pass `{:auto-commit false}` as part of the options map to anything that creates a connection (including `get-connection` itself). You can then decide when to commit or rollback by calling `.commit` or `.rollback` on the connection object itself. You can also create save points (`(.setSavepoint con)`, `(.setSavepoint con name)`) and rollback to them (`(.rollback con save-point)`). You can also change the auto-commit state of an open connection at any time (`(.setAutoCommit con on-off)`).
This is the machinery behind "transactions": one or more operations on a
`Connection` that are not automatically committed, and which can be rolled back
or committed explicitly at any point.
## Automatic Commit & Rollback
`next.jdbc`'s transaction handling provides a convenient baseline for either committing a group of operations if they all succeed or rolling them all back if any of them fails, by throwing an exception. You can either do this on an existing connection -- and `next.jdbc` will try to restore the state of the connection after the transaction completes -- or by providing a datasource and letting `with-transaction` create and manage its own connection:
```clojure
(jdbc/with-transaction [tx my-datasource]
(jdbc/execute! tx ...)
(jdbc/execute! tx ...)) ; will commit, unless exception thrown
(jdbc/with-transaction [tx my-datasource]
(jdbc/execute! tx ...)
(when ... (throw ...)) ; will rollback
(jdbc/execute! tx ...))
```
If `with-transaction` is given a datasource, it will create and close the connection for you. If you pass in an existing connection, `with-transaction` will set up a transaction on that connection and, after either committing or rolling back the transaction, will restore the state of the connection and leave it open.
You can also provide an options map as the third element of the binding vector (or the third argument to the `transact` function). The following options are supported:
* `:isolation` -- the isolation level for this transaction (see [All The Options](/doc/all-the-options.md) for specifics),
* `:read-only` -- set the transaction into read-only mode (if `true`),
* `:rollback-only` -- set the transaction to always rollback, even on success (if `true`).
The latter can be particularly useful in tests, to run a series of SQL operations during a test and then roll them all back at the end.
If you use `next.jdbc/with-transaction` (or `next.jdbc/transact`), then
`next.jdbc` keeps track of whether a "transaction" is in progress or not, and
you can call `next.jdbc/active-tx?` to determine that, in your own code, in
case you want to write code that behaves differently inside or outside a
transaction.
> Note: `active-tx?` only knows about `next.jdbc` transactions -- it cannot track any transactions that you create yourself using the underlying JDBC `Connection`. In addition, this is a per-thread "global" setting and not related to just a single connection, so you can't use this setting if you are working with multiple databases in the same dynamic thread context (`binding`).
## Manual Rollback Inside a Transaction
Instead of throwing an exception (which will propagate through `with-transaction` and therefore provide no result), you can also explicitly rollback if you want to return a result in that case:
```clojure
(jdbc/with-transaction [tx my-datasource]
(let [result (jdbc/execute! tx ...)]
(if ...
(do
(.rollback tx)
result)
(jdbc/execute! tx ...))))
```
## Save Points Inside a Transaction
In general, transactions are per-connection and do not nest in JDBC. If you nest calls to `with-transaction` using a `DataSource` argument (or a db-spec) then you will get separate connections inside each invocation and the transactions will be independent, as permitted by the isolation level.
If you nest such calls passing a `Connection` instead, the inner call will commit (or rollback) all operations on that connection up to that point -- including any performed in the outer call, prior to entering the inner call. The outer call will then commit (or rollback) any additional operations within its scope. This will be confusing at best and most likely buggy behavior! See below for ways to exercise more control over this behavior.
If you want the ability to selectively roll back certain groups of operations inside a transaction, you can use named or unnamed save points:
```clojure
(jdbc/with-transaction [tx my-datasource]
(let [result (jdbc/execute! tx ...) ; op A
sp1 (.setSavepoint tx)] ; unnamed save point
(jdbc/execute! tx ...) ; op B
(when ... (.rollback tx sp1)) ; just rolls back op B
(let [sp2 (.setSavepoint tx "two")] ; named save point
(jdbc/execute! tx ...) ; op C
(when ... (.rollback tx sp2))) ; just rolls back op C
result)) ; returns this and will commit op A
;; (and ops B & C if they weren't rolled back above)
```
### Nesting Transactions
As noted above, transactions do not nest in JDBC and `next.jdbc`'s default behavior is to allow you
to overlap transactions (i.e., nested calls to `with-transaction`) and assume you know what you are
doing, although it would generally be buggy programming to do so.
By contrast, `clojure.java.jdbc` allowed the nested calls but simply _ignored_ the inner calls and
behaved as it you had only the outermost, top-level transaction. That allowed for buggy programming
too, in a different way, but could be convenient if you wanted to override any transaction behavior
in called code, as you might wish to do with a test fixture that set up and rolled back a
transaction at the top-level -- you would just silently lose the effects of any (nested)
transactions in the code under test.
`next.jdbc` provides a way to control the behavior via a public, dynamic Var:
* `next.jdbc.transaction/*nested-tx*` is initially set to `:allow` which allows nested calls but makes them overlap (as described above),
* `(binding [next.jdbc.transaction/*nested-tx* :ignore] ...)` provides the same behavior as `clojure.java.jdbc` where nested calls are essentially ignored and only the outermost transaction takes effect,
* `(binding [next.jdbc.transaction/*nested-tx* :prohibit] ...)` will cause any attempt to start a nested transaction to throw an exception instead; this could be a useful way to detect the potentially buggy behavior described above (for either `:allow` or `:ignore`).
> Note: this is a per-thread "global" setting and not related to just a single connection, so you can't use this setting if you are working with multiple databases in the same dynamic thread context (`binding`).
### `with-options`
If you are using `with-options` to produce wrapped connectables / transactables,
it's important to be aware that `with-transaction` produces a bare Java
`java.sql.Connection` object that cannot have options -- but does allow direct
interop. If you want to use `with-options` with `with-transaction`, you must
either rewrap the `Connection` with a nested call to `with-options` or,
as of 1.3.894, you can use `with-transaction+options` which will automatically
rewrap the `Connection` in a new connectable along with the options from the
original transactable. Be aware that you cannot use Java interop on this
wrapped connectable.
next-jdbc-1.3.955/docker-compose.yml 0000664 0000000 0000000 00000000601 14700603111 0017177 0 ustar 00root root 0000000 0000000 services:
mysql:
image: percona:5.7
environment:
- MYSQL_ROOT_PASSWORD
ports:
- "3306:3306"
command:
[--character-set-server=utf8mb4, --collation-server=utf8mb4_unicode_ci]
sqlserver:
image: mcr.microsoft.com/mssql/server:2022-latest
environment:
ACCEPT_EULA: Y
MSSQL_SA_PASSWORD: Str0ngP4ssw0rd
ports:
- "1433:1433"
next-jdbc-1.3.955/resources/ 0000775 0000000 0000000 00000000000 14700603111 0015557 5 ustar 00root root 0000000 0000000 next-jdbc-1.3.955/resources/META-INF/ 0000775 0000000 0000000 00000000000 14700603111 0016717 5 ustar 00root root 0000000 0000000 next-jdbc-1.3.955/resources/META-INF/native-image/ 0000775 0000000 0000000 00000000000 14700603111 0021265 5 ustar 00root root 0000000 0000000 next-jdbc-1.3.955/resources/META-INF/native-image/io.github.seancorfield/ 0000775 0000000 0000000 00000000000 14700603111 0025612 5 ustar 00root root 0000000 0000000 next-jdbc-1.3.955/resources/META-INF/native-image/io.github.seancorfield/next.jdbc/ 0000775 0000000 0000000 00000000000 14700603111 0027471 5 ustar 00root root 0000000 0000000 native-image.properties 0000664 0000000 0000000 00000000167 14700603111 0034102 0 ustar 00root root 0000000 0000000 next-jdbc-1.3.955/resources/META-INF/native-image/io.github.seancorfield/next.jdbc Args=--initialize-at-build-time=clojure,next,camel_snake_kebab \
--initialize-at-build-time=java.sql.SQLException
next-jdbc-1.3.955/resources/clj-kondo.exports/ 0000775 0000000 0000000 00000000000 14700603111 0021142 5 ustar 00root root 0000000 0000000 next-jdbc-1.3.955/resources/clj-kondo.exports/com.github.seancorfield/ 0000775 0000000 0000000 00000000000 14700603111 0025636 5 ustar 00root root 0000000 0000000 next-jdbc-1.3.955/resources/clj-kondo.exports/com.github.seancorfield/next.jdbc/ 0000775 0000000 0000000 00000000000 14700603111 0027515 5 ustar 00root root 0000000 0000000 next-jdbc-1.3.955/resources/clj-kondo.exports/com.github.seancorfield/next.jdbc/config.edn 0000664 0000000 0000000 00000000534 14700603111 0031454 0 ustar 00root root 0000000 0000000 {:hooks
{:analyze-call
{next.jdbc/with-transaction
hooks.com.github.seancorfield.next-jdbc/with-transaction
next.jdbc/with-transaction+options
hooks.com.github.seancorfield.next-jdbc/with-transaction+options}}
:lint-as {next.jdbc/on-connection clojure.core/with-open
next.jdbc/on-connection+options clojure.core/with-open}}
next-jdbc-1.3.955/resources/clj-kondo.exports/com.github.seancorfield/next.jdbc/hooks/ 0000775 0000000 0000000 00000000000 14700603111 0030640 5 ustar 00root root 0000000 0000000 next-jdbc-1.3.955/resources/clj-kondo.exports/com.github.seancorfield/next.jdbc/hooks/com/ 0000775 0000000 0000000 00000000000 14700603111 0031416 5 ustar 00root root 0000000 0000000 next-jdbc-1.3.955/resources/clj-kondo.exports/com.github.seancorfield/next.jdbc/hooks/com/github/ 0000775 0000000 0000000 00000000000 14700603111 0032700 5 ustar 00root root 0000000 0000000 seancorfield/ 0000775 0000000 0000000 00000000000 14700603111 0035257 5 ustar 00root root 0000000 0000000 next-jdbc-1.3.955/resources/clj-kondo.exports/com.github.seancorfield/next.jdbc/hooks/com/github next_jdbc.clj_kondo 0000664 0000000 0000000 00000002326 14700603111 0041106 0 ustar 00root root 0000000 0000000 next-jdbc-1.3.955/resources/clj-kondo.exports/com.github.seancorfield/next.jdbc/hooks/com/github/seancorfield (ns hooks.com.github.seancorfield.next-jdbc
(:require [clj-kondo.hooks-api :as api]))
(defn with-transaction
"Expands (with-transaction [tx expr opts] body)
to (let [tx expr] opts body) per clj-kondo examples."
[{:keys [:node]}]
(let [[binding-vec & body] (rest (:children node))
[sym val opts] (:children binding-vec)]
(when-not (and sym val)
(throw (ex-info "No sym and val provided" {})))
(let [new-node (api/list-node
(list*
(api/token-node 'let)
(api/vector-node [sym val])
opts
body))]
{:node new-node})))
(defn with-transaction+options
"Expands (with-transaction+options [tx expr opts] body)
to (let [tx expr] opts body) per clj-kondo examples."
[{:keys [:node]}]
(let [[binding-vec & body] (rest (:children node))
[sym val opts] (:children binding-vec)]
(when-not (and sym val)
(throw (ex-info "No sym and val provided" {})))
(let [new-node (api/list-node
(list*
(api/token-node 'let)
(api/vector-node [sym val])
opts
body))]
{:node new-node})))
next-jdbc-1.3.955/run-tests.clj 0000775 0000000 0000000 00000001427 14700603111 0016212 0 ustar 00root root 0000000 0000000 #!/usr/bin/env bb
(require '[babashka.process :as p])
(defn- run-tests [env v]
(when v (println "\nTesting Clojure" v))
(let [{:keys [exit]}
(p/shell {:extra-env env} "clojure" (str "-X"
(when v (str ":" v))
":test"))]
(when-not (zero? exit)
(System/exit exit))))
(let [maria? (some #(= "maria" %) *command-line-args*)
all? (some #(= "all" %) *command-line-args*)
env
(cond-> {"NEXT_JDBC_TEST_MSSQL" "yes"
"NEXT_JDBC_TEST_MYSQL" "yes"
"MSSQL_SA_PASSWORD" "Str0ngP4ssw0rd"}
maria?
(assoc "NEXT_JDBC_TEST_MARIADB" "yes"))]
(doseq [v (if all? ["1.10" "1.11" "1.12"] [nil])]
(run-tests env v)))
next-jdbc-1.3.955/src/ 0000775 0000000 0000000 00000000000 14700603111 0014334 5 ustar 00root root 0000000 0000000 next-jdbc-1.3.955/src/next/ 0000775 0000000 0000000 00000000000 14700603111 0015312 5 ustar 00root root 0000000 0000000 next-jdbc-1.3.955/src/next/jdbc.clj 0000664 0000000 0000000 00000062525 14700603111 0016720 0 ustar 00root root 0000000 0000000 ;; copyright (c) 2018-2023 Sean Corfield, all rights reserved
(ns next.jdbc
"The public API of the next generation java.jdbc library.
The basic building blocks are the `java.sql`/`javax.sql` classes:
* `DataSource` -- something to get connections from,
* `Connection` -- an active connection to the database,
* `PreparedStatement` -- SQL and parameters combined, from a connection,
and the following functions and a macro:
* `get-datasource` -- given a hash map describing a database or a JDBC
connection string, construct a `javax.sql.DataSource` and return it,
* `get-connection` -- given a connectable, obtain a new `java.sql.Connection`
from it and return that,
* `plan` -- given a connectable and SQL + parameters or a statement,
return a reducible that, when reduced (with an initial value) will
execute the SQL and consume the `ResultSet` produced,
* `execute!` -- given a connectable and SQL + parameters or a statement,
execute the SQL, consume the `ResultSet` produced, and return a vector
of hash maps representing the rows (@1); this can be datafied to allow
navigation of foreign keys into other tables (either by convention or
via a schema definition),
* `execute-one!` -- given a connectable and SQL + parameters or a statement,
execute the SQL, consume the first row of the `ResultSet` produced, and
return a hash map representing that row; this can be datafied to allow
navigation of foreign keys into other tables (either by convention or
via a schema definition),
* `execute-batch!` -- given a `PreparedStatement` and groups of parameters,
execute the statement in batch mode (via `.executeBatch`); given a
connectable, a SQL string, and groups of parameters, create a new
`PreparedStatement` from the SQL and execute it in batch mode.
* `prepare` -- given a `Connection` and SQL + parameters, construct a new
`PreparedStatement`; in general this should be used with `with-open`,
* `transact` -- the functional implementation of `with-transaction`,
* `with-transaction` -- execute a series of SQL operations within a transaction.
@1 result sets are built, by default, as vectors of hash maps, containing
qualified keywords as column names, but the row builder and result set
builder machinery is open and alternatives are provided to produce
unqualified keywords as column names, and to produce a vector the
column names followed by vectors of column values for each row, and
lower-case variants of each.
The following options are supported wherever a `Connection` is created:
* `:auto-commit` -- either `true` or `false`,
* `:read-only` -- either `true` or `false`,
* `:connection` -- a hash map of camelCase properties to set, via reflection,
on the `Connection` object after it is created.
The following options are supported wherever a `Statement` or
`PreparedStatement` is created:
* `:concurrency` -- `:read-only`, `:updatable`,
* `:cursors` -- `:close`, `:hold`
* `:fetch-size` -- the fetch size value,
* `:max-rows` -- the maximum number of rows to return,
* `:result-type` -- `:forward-only`, `:scroll-insensitive`, `:scroll-sensitive`,
* `:timeout` -- the query timeout,
* `:statement` -- a hash map of camelCase properties to set, via reflection,
on the `Statement` or `PreparedStatement` object after it is created.
In addition, wherever a `PreparedStatement` is created, you may specify:
* `:return-keys` -- either `true` or a vector of key names to return."
(:require [camel-snake-kebab.core :refer [->kebab-case ->snake_case]]
[next.jdbc.connection]
[next.jdbc.default-options :as opts]
[next.jdbc.prepare :as prepare]
[next.jdbc.protocols :as p]
[next.jdbc.result-set :as rs]
[next.jdbc.sql-logging :as logger]
[next.jdbc.transaction :as tx])
(:import (java.sql PreparedStatement)))
(set! *warn-on-reflection* true)
(defn get-datasource
"Given some sort of specification of a database, return a `DataSource`.
A specification can be a JDBC URL string (which is passed to the JDBC
driver as-is), or a hash map.
For the hash map, there are two formats accepted:
In the first format, these keys are required:
* `:dbtype` -- a string indicating the type of the database
* `:dbname` -- a string indicating the name of the database to be used
The following optional keys are commonly used:
* `:user` -- the username to authenticate with
* `:password` -- the password to authenticate with
* `:host` -- the hostname or IP address of the database (default: `127.0.0.1`);
can be `:none` which means the host/port segment of the JDBC URL should
be omitted entirely (for 'local' databases)
* `:port` -- the port for the database connection (the default is database-
specific -- see below); can be `:none` which means the port segment of
the JDBC URL should be omitted entirely
* `:classname` -- if you need to override the default for the `:dbtype`
(or you want to use a database that next.jdbc does not know about!)
The following optional keys can be used to control how JDBC URLs are
assembled. This may be needed for `:dbtype` values that `next.jdbc`
does not recognize:
* `:dbname-separator` -- override the `/` or `:` that normally precedes
the database name in the JDBC URL
* `:host-prefix` -- override the `//` that normally precedes the IP
address or hostname in the JDBC URL
* `:property-separator` -- an optional string that can be used to override
the separators used in `jdbc-url` for the properties (after the initial
JDBC URL portion); by default `?` and `&` are used to build JDBC URLs
with properties; for SQL Server drivers (both MS and jTDS)
`:property-separator \";\"` is used
In the second format, this key is required:
* `:jdbcUrl` -- a JDBC URL string
Any additional options provided will be passed to the JDBC driver's
`.getConnection` call as a `java.util.Properties` structure.
Database types supported (for `:dbtype`), and their defaults:
* `derby` -- `org.apache.derby.jdbc.EmbeddedDriver` -- also pass `:create true`
if you want the database to be automatically created
* `duckdb` -- `org.duckdb.DuckDBDriver` -- embedded database
* `h2` -- `org.h2.Driver` -- for an on-disk database
* `h2:mem` -- `org.h2.Driver` -- for an in-memory database
* `hsqldb`, `hsql` -- `org.hsqldb.jdbcDriver`
* `jtds:sqlserver`, `jtds` -- `net.sourceforge.jtds.jdbc.Driver` -- `1433`
* `mariadb` -- `org.mariadb.jdbc.Driver` -- `3306`
* `mysql` -- `com.mysql.cj.jdbc.Driver`, `com.mysql.jdbc.Driver` -- `3306`
* `oracle:oci` -- `oracle.jdbc.OracleDriver` -- `1521`
* `oracle:thin`, `oracle` -- `oracle.jdbc.OracleDriver` -- `1521`
* `oracle:sid` -- `oracle.jdbc.OracleDriver` -- `1521` -- uses the legacy `:`
separator for the database name but otherwise behaves like `oracle:thin`
* `postgresql`, `postgres` -- `org.postgresql.Driver` -- `5432`
* `pgsql` -- `com.impossibl.postgres.jdbc.PGDriver` -- no default port
* `redshift` -- `com.amazon.redshift.jdbc.Driver` -- no default port
* `sqlite` -- `org.sqlite.JDBC`
* `sqlserver`, `mssql` -- `com.microsoft.sqlserver.jdbc.SQLServerDriver` -- `1433`
* `timesten:client` -- `com.timesten.jdbc.TimesTenClientDriver`
* `timesten:direct` -- `com.timesten.jdbc.TimesTenDriver`
For more details about `:dbtype` and `:classname` values, see:
https://cljdoc.org/d/com.github.seancorfield/next.jdbc/CURRENT/api/next.jdbc.connection#dbtypes"
^javax.sql.DataSource
[spec]
(p/get-datasource spec))
(defn get-connection
"Given some sort of specification of a database, return a new `Connection`.
In general, this should be used via `with-open`:
```clojure
(with-open [con (get-connection spec opts)]
(run-some-ops con))
```
If you call `get-connection` on a `DataSource`, it just calls `.getConnection`
and applies the `:auto-commit` and/or `:read-only` options, if provided.
If you call `get-connection` on anything else, it will call `get-datasource`
first to try to get a `DataSource`, and then call `get-connection` on that.
If you want different per-connection username/password values, you can
either put `:user` and `:password` into the `opts` hash map or pass them
as positional arguments."
(^java.sql.Connection
[spec]
(p/get-connection spec {}))
(^java.sql.Connection
[spec opts]
(p/get-connection spec opts))
(^java.sql.Connection
[spec user password]
(p/get-connection spec {:user user :password password}))
(^java.sql.Connection
[spec user password opts]
(p/get-connection spec (assoc opts :user user :password password))))
(defn prepare
"Given a connection to a database, and a vector containing SQL and any
parameters it needs, return a new `PreparedStatement`.
In general, this should be used via `with-open`:
```clojure
(with-open [stmt (prepare spec sql-params opts)]
(run-some-ops stmt))
```
See the list of options above (in the namespace docstring) for what can
be passed to prepare."
(^java.sql.PreparedStatement
[connection sql-params]
(p/prepare connection sql-params {}))
(^java.sql.PreparedStatement
[connection sql-params opts]
(p/prepare connection sql-params opts)))
(defn plan
"General SQL execution function (for working with result sets).
Returns a reducible that, when reduced (with an initial value), runs the
SQL and yields the result. `plan` returns an `IReduceInit` object so you
must provide an initial value when calling `reduce` on it.
The reducible is also foldable (in the `clojure.core.reducers` sense) but
see the **Tips & Tricks** section of the documentation for some important
caveats about that.
Can be called on a `PreparedStatement`, a `Connection`, or something that can
produce a `Connection` via a `DataSource`.
Your reducing function can read columns by name (string or simple keyword)
from each row of the underlying `ResultSet` without realizing the row as
a Clojure hash map. `select-keys` can also be used without realizing the row.
Operations that imply an actual Clojure data structure (such as `assoc`,
`dissoc`, `seq`, `keys`, `vals`, etc) will realize the row into a hash map
using the supplied `:builder-fn` (or `as-maps` by default).
If your reducing function needs to produce a hash map without calling a
function that implicitly realizes the row, you can call:
`(next.jdbc.result-set/datafiable-row row connectable opts)`
passing in the current row (passed to the reducing function), a `connectable`,
and an `opts` hash map. These can be the same values that you passed to `plan`
(or they can be different, depending on how you want the row to be built,
and how you want any subsequent lazy navigation to be handled)."
(^clojure.lang.IReduceInit
[stmt]
(p/-execute stmt [] {}))
(^clojure.lang.IReduceInit
[connectable sql-params]
(p/-execute connectable sql-params
{:next.jdbc/sql-params sql-params}))
(^clojure.lang.IReduceInit
[connectable sql-params opts]
(p/-execute connectable sql-params
(assoc opts :next.jdbc/sql-params sql-params))))
(defn execute!
"General SQL execution function.
Returns a fully-realized result set. When `:multi-rs true` is provided, will
return multiple result sets, as a vector of result sets. Each result set is
a vector of hash maps, by default, but can be controlled by the `:builder-fn`
option.
Can be called on a `PreparedStatement`, a `Connection`, or something that can
produce a `Connection` via a `DataSource`."
([stmt]
(p/-execute-all stmt [] {}))
([connectable sql-params]
(p/-execute-all connectable sql-params
{:next.jdbc/sql-params sql-params}))
([connectable sql-params opts]
(p/-execute-all connectable sql-params
(assoc opts :next.jdbc/sql-params sql-params))))
(defn execute-one!
"General SQL execution function that returns just the first row of a result.
For any DDL or SQL statement that will return just an update count, this is
the preferred function to use.
Can be called on a `PreparedStatement`, a `Connection`, or something that can
produce a `Connection` via a `DataSource`.
Note: although this only returns the first row of a result set, it does not
place any limit on the result of the SQL executed."
([stmt]
(p/-execute-one stmt [] {}))
([connectable sql-params]
(p/-execute-one connectable sql-params
{:next.jdbc/sql-params sql-params}))
([connectable sql-params opts]
(p/-execute-one connectable sql-params
(assoc opts :next.jdbc/sql-params sql-params))))
(defn execute-batch!
"Given a `PreparedStatement` and a vector containing parameter groups,
i.e., a vector of vector of parameters, use `.addBatch` to add each group
of parameters to the prepared statement (via `set-parameters`) and then
call `.executeBatch`. A vector of update counts is returned.
An options hash map may also be provided, containing `:batch-size` which
determines how to partition the parameter groups for submission to the
database. If omitted, all groups will be submitted as a single command.
If you expect the update counts to be larger than `Integer/MAX_VALUE`,
you can specify `:large true` and `.executeLargeBatch` will be called
instead.
Alternatively, given a connectable, a SQL string, a vector containing
parameter groups, and an options hash map, create a new `PreparedStatement`
(after possibly creating a new `Connection`), and execute the SQL with
the specified parameter groups. That new `PreparedStatement` (and the
new `Connection`, if created) will be closed automatically after use.
By default, returns a Clojure vector of update counts. Some databases
allow batch statements to also return generated keys and you can attempt that
if you ensure the `PreparedStatement` is created with `:return-keys true`
and you also provide `:return-generated-keys true` in the options passed
to `execute-batch!`. Some databases will only return one generated key
per batch, some return all the generated keys, some will throw an exception.
If that is supported, `execute-batch!` will return a vector of hash maps
containing the generated keys as fully-realized, datafiable result sets,
whose content is database-dependent.
May throw `java.sql.BatchUpdateException` if any part of the batch fails.
You may be able to call `.getUpdateCounts` on that exception object to
get more information about which parts succeeded and which failed.
For additional caveats and database-specific options you may need, see:
https://cljdoc.org/d/com.github.seancorfield/next.jdbc/CURRENT/doc/getting-started/prepared-statements#caveats
Not all databases support batch execution."
([ps param-groups]
(execute-batch! ps param-groups {}))
([^PreparedStatement ps param-groups opts]
(let [params (if-let [n (:batch-size opts)]
(if (and (number? n) (pos? n))
(partition-all n param-groups)
(throw (IllegalArgumentException.
":batch-size must be positive")))
[param-groups])]
(into []
(mapcat (fn [group]
(run! #(.addBatch (prepare/set-parameters ps %)) group)
(let [result (if (:large opts)
(.executeLargeBatch ps)
(.executeBatch ps))]
(if (:return-generated-keys opts)
(rs/datafiable-result-set (.getGeneratedKeys ps)
(p/get-connection ps {})
opts)
result))))
params)))
([connectable sql param-groups opts]
(if (or (instance? java.sql.Connection connectable)
(and (satisfies? p/Connectable connectable)
(instance? java.sql.Connection (:connectable connectable))))
(with-open [ps (prepare connectable [sql] opts)]
(execute-batch! ps param-groups opts))
(with-open [con (get-connection connectable)]
(execute-batch! con sql param-groups opts)))))
(defmacro on-connection
"Given a connectable object, gets a connection and binds it to `sym`,
then executes the `body` in that context.
This allows you to write generic, `Connection`-based code without
needing to know the exact type of an incoming datasource:
```clojure
(on-connection [conn datasource]
(let [metadata (.getMetadata conn)
catalog (.getCatalog conn)]
...))
```
If passed a `Connection` or a `Connectable` that wraps a `Connection`,
then that `Connection` is used as-is.
Otherwise, creates a new `Connection` object from the connectable,
executes the body, and automatically closes it for you."
[[sym connectable] & body]
(let [con-sym (vary-meta sym assoc :tag 'java.sql.Connection)]
`(let [con-obj# ~connectable]
(cond (instance? java.sql.Connection con-obj#)
((^{:once true} fn* [~con-sym] ~@body) con-obj#)
(and (satisfies? p/Connectable con-obj#)
(instance? java.sql.Connection (:connectable con-obj#)))
((^{:once true} fn* [~con-sym] ~@body) (:connectable con-obj#))
:else
(with-open [con# (get-connection con-obj#)]
((^{:once true} fn* [~con-sym] ~@body) con#))))))
(defmacro on-connection+options
"Given a connectable object, assumed to be wrapped with options, gets
a connection, rewraps it with those options, and binds it to `sym`,
then executes the `body` in that context.
This allows you to write generic, **wrapped** connectable code without
needing to know the exact type of an incoming datasource:
```clojure
(on-connection+options [conn datasource]
(execute! conn some-insert-sql)
(execute! conn some-update-sql))
```
If passed a `Connection` then that `Connection` is used as-is.
If passed a `Connectable` that wraps a `Connection`, then that
`Connectable` is used as-is.
Otherwise, creates a new `Connection` object from the connectable,
wraps that with options, executes the body, and automatically closes
the new `Connection` for you.
Note: the bound `sym` will be a **wrapped** connectable and not a plain
Java object, so you cannot call JDBC methods directly on it like you can
with `on-connection`."
[[sym connectable] & body]
`(let [con-obj# ~connectable]
(cond (instance? java.sql.Connection con-obj#)
((^{:once true} fn* [~sym] ~@body) con-obj#)
(and (satisfies? p/Connectable con-obj#)
(instance? java.sql.Connection (:connectable con-obj#)))
((^{:once true} fn* [~sym] ~@body) con-obj#)
:else
(with-open [con# (get-connection con-obj#)]
((^{:once true} fn* [~sym] ~@body)
(with-options con# (:options con-obj# {})))))))
(defn transact
"Given a transactable object and a function (taking a `Connection`),
execute the function over the connection in a transactional manner.
See `with-transaction` for supported options."
([transactable f]
(p/-transact transactable f {}))
([transactable f opts]
(p/-transact transactable f opts)))
(defmacro with-transaction
"Given a transactable object, gets a connection and binds it to `sym`,
then executes the `body` in that context, committing any changes if the body
completes successfully, otherwise rolling back any changes made.
Like `with-open`, if `with-transaction` creates a new `Connection` object,
it will automatically close it for you.
If you are working with default options via `with-options`, you might want
to use `with-transaction+options` instead.
The options map supports:
* `:isolation` -- `:none`, `:read-committed`, `:read-uncommitted`,
`:repeatable-read`, `:serializable`,
* `:read-only` -- `true` / `false` (`true` will make the `Connection` readonly),
* `:rollback-only` -- `true` / `false` (`true` will make the transaction
rollback, even if it would otherwise succeed)."
[[sym transactable opts] & body]
(let [con (vary-meta sym assoc :tag 'java.sql.Connection)]
`(transact ~transactable (^{:once true} fn* [~con] ~@body) ~(or opts {}))))
(defn active-tx?
"Returns true if `next.jdbc` has a currently active transaction in the
current thread, else false.
Note: transactions are a convention of operations on a `Connection` so
this predicate only reflects `next.jdbc/transact` and `next.jdbc/with-transaction`
operations -- it does not reflect any other operations on a `Connection`,
performed via JDBC interop directly."
[]
@#'tx/*active-tx*)
(defn with-options
"Given a connectable/transactable object and a set of (default) options
that should be used on all operations on that object, return a new
wrapper object that can be used in its place.
Bear in mind that `get-datasource`, `get-connection`, and `with-transaction`
return plain Java objects, so if you call any of those on this wrapped
object, you'll need to re-wrap the Java object `with-options` again. See
the Datasources, Connections & Transactions section of Getting Started for
more details, and some examples of use with these functions.
`with-transaction+options` exists to automatically rewrap a `Connection`
with the options from a `with-options` wrapper."
[connectable opts]
(let [c (:connectable connectable)
o (:options connectable)]
(if (and c o)
(opts/->DefaultOptions c (merge o opts))
(opts/->DefaultOptions connectable opts))))
(defmacro with-transaction+options
"Given a transactable object, assumed to be wrapped with options, gets a
connection, rewraps it with those options, and binds it to `sym`, then
executes the `body` in that context, committing any changes if the body
completes successfully, otherwise rolling back any changes made.
Like `with-open`, if `with-transaction+options` creates a new `Connection`
object, it will automatically close it for you.
Note: the bound `sym` will be a **wrapped** connectable and not a plain
Java object, so you cannot call JDBC methods directly on it like you can
with `with-transaction`.
The options map supports:
* `:isolation` -- `:none`, `:read-committed`, `:read-uncommitted`,
`:repeatable-read`, `:serializable`,
* `:read-only` -- `true` / `false` (`true` will make the `Connection` readonly),
* `:rollback-only` -- `true` / `false` (`true` will make the transaction
rollback, even if it would otherwise succeed)."
[[sym transactable opts] & body]
`(let [tx# ~transactable]
(transact tx#
(^{:once true} fn*
[con#] ; this is the unwrapped java.sql.connection
(let [~sym (with-options con# (:options tx# {}))]
~@body))
~(or opts {}))))
(defn with-logging
"Given a connectable/transactable object and a sql/params logging
function and an optional result logging function that should be used
on all operations on that object, return a new wrapper object that can
be used in its place.
The sql/params logging function will be called with two arguments:
* a symbol indicating which operation is being performed:
* `next.jdbc/plan`, `next.jdbc/execute-one!`, `next.jdbc/execute!`,
or `next.jdbc/prepare`
* the vector containing the SQL string and its parameters
Whatever the sql/params logging function returns will be passed as a
`state` argument to the optional result logging function. This means you can
use this mechanism to provide some timing information, since your sql/params
logging function can return the current system time, and your result logging
function can then calculate the elapsed time. There is an example of this in
the Naive Logging with Timing section of Getting Started.
The result logging function, if provided, will be called with the
same symbol passed to the sql/params logging function, the `state`
returned by the sql/params logging function, and either the result of
the `execute!` or `execute-one!` call or an exception if the call
failed. The result logging function is not called for the `plan`
or `prepare` call (since they do not produce result sets directly).
Bear in mind that `get-datasource`, `get-connection`, and `with-transaction`
return plain Java objects, so if you call any of those on this wrapped
object, you'll need to re-wrap the Java object `with-logging` again. See
the Datasources, Connections & Transactions section of Getting Started for
more details, and some examples of use with these functions."
[connectable sql-logger & [result-logger]]
(logger/->SQLLogging connectable sql-logger result-logger (:options connectable)))
(def snake-kebab-opts
"A hash map of options that will convert Clojure identifiers to
snake_case SQL entities (`:table-fn`, `:column-fn`), and will convert
SQL entities to qualified kebab-case Clojure identifiers (`:builder-fn`)."
{:column-fn ->snake_case :table-fn ->snake_case
:label-fn ->kebab-case :qualifier-fn ->kebab-case
:builder-fn rs/as-kebab-maps})
(def unqualified-snake-kebab-opts
"A hash map of options that will convert Clojure identifiers to
snake_case SQL entities (`:table-fn`, `:column-fn`), and will convert
SQL entities to unqualified kebab-case Clojure identifiers (`:builder-fn`)."
{:column-fn ->snake_case :table-fn ->snake_case
:label-fn ->kebab-case :qualifier-fn ->kebab-case
:builder-fn rs/as-unqualified-kebab-maps})
next-jdbc-1.3.955/src/next/jdbc/ 0000775 0000000 0000000 00000000000 14700603111 0016214 5 ustar 00root root 0000000 0000000 next-jdbc-1.3.955/src/next/jdbc/connection.clj 0000664 0000000 0000000 00000053121 14700603111 0021047 0 ustar 00root root 0000000 0000000 ;; copyright (c) 2018-2024 Sean Corfield, all rights reserved
(ns next.jdbc.connection
"Standard implementations of `get-datasource` and `get-connection`.
Also provides `dbtypes` as a map of all known database types, and
the `->pool` and `component` functions for creating pooled datasource
objects."
(:require [clojure.java.data :as j]
[clojure.string :as str]
[next.jdbc.protocols :as p])
(:import (java.sql Connection DriverManager)
(javax.sql DataSource)
(java.util Properties)))
(set! *warn-on-reflection* true)
(def dbtypes
"A map of all known database types (including aliases) to the class name(s)
and port that `next.jdbc` supports out of the box. For databases that have
non-standard prefixes for the `:dbname` and/or `:host` values in the JDBC
string, this table includes `:dbname-separator` and/or `:host-prefix`. The
default prefix for `:dbname` is either `/` or `:` and for `:host` it is `//`.
For local databases, with no `:host`/`:port` segment in their JDBC URL, a
value of `:none` is provided for `:host` in this table. In addition,
`:property-separator` can specify how you build the JDBC URL.
For known database types, you can use `:dbtype` (and omit `:classname`).
If you want to use a database that is not in this list, you can specify
a new `:dbtype` along with the class name of the JDBC driver in `:classname`.
You will also need to specify `:port`. For example:
`{:dbtype \"acme\" :classname \"com.acme.JdbcDriver\" ...}`
The value of `:dbtype` should be the string that the driver is associated
with in the JDBC URL, i.e., the value that comes between the `jdbc:`
prefix and the `://...` part. In the above example, the JDBC URL
that would be generated would be `jdbc:acme://:/`.
If you want `next.jdbc` to omit the host/port part of the URL, specify
`:host :none`, which would produce a URL like: `jdbc:acme:`,
which allows you to work with local databases (or drivers that do not
need host/port information).
The default prefix for the host name (or IP address) is `//`. You
can override this via the `:host-prefix` option.
The default separator between the host/port and the database name is `/`.
The default separator between the subprotocol and the database name,
for local databases with no host/port, is `:`. You can override this
via the `:dbname-separator` option.
JDBC drivers are not provided by `next.jdbc` -- you need to specify the
driver(s) you need as additional dependencies in your project. For
example:
`[com.acme/jdbc \"1.2.3\"] ; lein/boot`
or:
`com.acme/jdbc {:mvn/version \"1.2.3\"} ; CLI/deps.edn`
Note: the `:classname` value can be a string or a vector of strings. If
a vector of strings is provided, an attempt will be made to load each
named class in order, until one succeeds. This allows for a given `:dbtype`
to be used with different versions of a JDBC driver, if the class name
has changed over time (such as with MySQL)."
{"derby" {:classname "org.apache.derby.jdbc.EmbeddedDriver"
:host :none}
"duckdb" {:classname "org.duckdb.DuckDBDriver"
:host :none}
"h2" {:classname "org.h2.Driver"
:property-separator ";"
:host :none}
"h2:mem" {:classname "org.h2.Driver"
:property-separator ";"}
"hsql" {:classname "org.hsqldb.jdbcDriver"
:alias-for "hsqldb"
:host :none}
"hsqldb" {:classname "org.hsqldb.jdbcDriver"
:host :none}
"jtds" {:classname "net.sourceforge.jtds.jdbc.Driver"
:alias-for "jtds:sqlserver"
:property-separator ";"
:port 1433}
"jtds:sqlserver" {:classname "net.sourceforge.jtds.jdbc.Driver"
:property-separator ";"
:port 1433}
"mariadb" {:classname "org.mariadb.jdbc.Driver"
:port 3306}
"mssql" {:classname "com.microsoft.sqlserver.jdbc.SQLServerDriver"
:alias-for "sqlserver"
:dbname-separator ";DATABASENAME="
:property-separator ";"
:port 1433}
"mysql" {:classname ["com.mysql.cj.jdbc.Driver"
"com.mysql.jdbc.Driver"]
:port 3306}
"oracle" {:classname "oracle.jdbc.OracleDriver"
:alias-for "oracle:thin"
:host-prefix "@"
:port 1521}
"oracle:oci" {:classname "oracle.jdbc.OracleDriver"
:host-prefix "@"
:port 1521}
"oracle:sid" {:classname "oracle.jdbc.OracleDriver"
:alias-for "oracle:thin"
:dbname-separator ":"
:host-prefix "@"
:port 1521}
"oracle:thin" {:classname "oracle.jdbc.OracleDriver"
:host-prefix "@"
:port 1521}
"postgres" {:classname "org.postgresql.Driver"
:alias-for "postgresql"
:port 5432}
"postgresql" {:classname "org.postgresql.Driver"
:port 5432}
"pgsql" {:classname "com.impossibl.postgres.jdbc.PGDriver"}
"redshift" {:classname "com.amazon.redshift.jdbc.Driver"}
"sqlite" {:classname "org.sqlite.JDBC"
:host :none}
"sqlserver" {:classname "com.microsoft.sqlserver.jdbc.SQLServerDriver"
:dbname-separator ";DATABASENAME="
:property-separator ";"
:port 1433}
"timesten:client" {:classname "com.timesten.jdbc.TimesTenClientDriver"
:dbname-separator ":dsn="
:host :none}
"timesten:direct" {:classname "com.timesten.jdbc.TimesTenDriver"
:dbname-separator ":dsn="
:host :none}})
(def ^:private driver-cache
"An optimization for repeated calls to get-datasource, or for get-connection
called on a db-spec hash map, so that we only try to load the classes once."
(atom {}))
(defn- spec->url+etc
"Given a database spec, return a JDBC URL and a map of any additional options.
As a special case, the database spec can contain jdbcUrl (just like ->pool),
in which case it will return that URL as-is and a map of any other options."
[{:keys [dbtype dbname host port classname
dbname-separator host-prefix property-separator
jdbcUrl]
:as db-spec}]
(let [etc (dissoc db-spec
:dbtype :dbname :host :port :classname
:dbname-separator :host-prefix :property-separator
:jdbcUrl)]
(if jdbcUrl
[jdbcUrl etc]
(let [;; allow aliases for dbtype
subprotocol (-> dbtype dbtypes :alias-for (or dbtype))
host (or host (-> dbtype dbtypes :host) "127.0.0.1")
port (or port (-> dbtype dbtypes :port))
db-sep (or dbname-separator (-> dbtype dbtypes :dbname-separator (or "/")))
local-sep (or dbname-separator (-> dbtype dbtypes :dbname-separator (or ":")))
url (cond (#{"h2"} subprotocol)
(str "jdbc:" subprotocol local-sep
(if (re-find #"^([A-Za-z]:)?[\./\\]" dbname)
;; DB name starts with relative or absolute path
dbname
;; otherwise make it local
(str "./" dbname)))
(#{"h2:mem"} subprotocol)
(str "jdbc:" subprotocol local-sep dbname ";DB_CLOSE_DELAY=-1")
(= :none host)
(str "jdbc:" subprotocol local-sep dbname)
:else
(str "jdbc:" subprotocol ":"
(or host-prefix (-> dbtype dbtypes :host-prefix (or "//")))
host
(when (and port (not= :none port)) (str ":" port))
db-sep dbname))]
;; verify the datasource is loadable
(if-let [class-name (or classname (-> dbtype dbtypes :classname))]
(swap! driver-cache update class-name
#(if % %
(let [;; force DriverManager to be loaded
_ (DriverManager/getLoginTimeout)]
(if (string? class-name)
(clojure.lang.RT/loadClassForName class-name)
(loop [[clazz & more] class-name]
(let [loaded
(try
(clojure.lang.RT/loadClassForName clazz)
(catch Exception e
e))]
(if (instance? Throwable loaded)
(if (seq more)
(recur more)
(throw loaded))
loaded)))))))
(throw (ex-info (str "Unknown dbtype: " dbtype
", and :classname not provided.")
db-spec)))
[url etc (or property-separator
(-> dbtype dbtypes :property-separator))]))))
(defn jdbc-url
"Given a database spec (as a hash map), return a JDBC URL with all the
attributes added to the query string. The result is suitable for use in
calls to `->pool` and `component` as the `:jdbcUrl` key in the parameter
map for the connection pooling library.
This allows you to build a connection-pooled datasource that needs
additional settings that the pooling library does not support, such as
`:serverTimezone`:
```clojure
(def db-spec {:dbtype .. :dbname .. :user .. :password ..
:serverTimezone \"UTC\"})
(def ds (next.jdbc.connection/->pool
HikariCP {:jdbcUrl (next.jdbc.connection/jdbc-url db-spec)
:maximumPoolSize 15}))
```
This also clearly separates the attributes that should be part of the
JDBC URL from the attributes that should be configured on the pool.
Since JDBC drivers can handle URL encoding differently, if you are
trying to pass attributes that might need encoding, you should make
sure they are properly URL-encoded as values in the database spec hash map.
This function does **not** attempt to URL-encode values for you!"
[db-spec]
(let [[url etc ps] (spec->url+etc db-spec)
url-and (or ps (if (str/index-of url "?") "&" "?"))]
(if (seq etc)
(str url url-and (str/join (or ps "&")
(reduce-kv (fn [pairs k v]
(conj pairs (str (name k) "=" v)))
[]
etc)))
url)))
(defn ->pool
"Given a (connection pooled datasource) class and a database spec, return a
connection pool object built from that class and the database spec.
Assumes the `clazz` has a `.setJdbcUrl` method (which HikariCP and c3p0 do).
If you already have a JDBC URL and want to use this method, pass `:jdbcUrl`
in the database spec (instead of `:dbtype`, `:dbname`, etc).
Properties for the connection pool object can be passed as mixed case
keywords that correspond to setter methods (just as `:jdbcUrl` maps to
`.setJdbcUrl`). `clojure.java.data/to-java` is used to construct the
object and call the setters.
If you need to pass in connection URL parameters, it can be easier to use
`next.jdbc.connection/jdbc-url` to construct URL, e.g.,
(->pool HikariDataSource
{:jdbcUrl (jdbc-url {:dbtype .. :dbname .. :useSSL false})
:username .. :password ..})
Here we pass `:useSSL false` to `jdbc-url` so that it ends up in the
connection string, but pass `:username` and `:password` for the pool itself.
Note that the result is not type-hinted (because there's no common base
class or interface that can be assumed). In particular, connection pooled
datasource objects may need to be closed but they don't necessarily implement
`java.io.Closeable` (HikariCP does, c3p0 does not)."
[clazz db-spec]
(if (:jdbcUrl db-spec)
(j/to-java clazz db-spec)
(let [[url etc] (spec->url+etc db-spec)]
(j/to-java clazz (assoc etc :jdbcUrl url)))))
(defn- attempt-close
"Given an arbitrary object that almost certainly supports a `.close`
method that takes no arguments and returns `void`, try to find it
and call it."
[obj]
(let [^Class clazz (class obj)
^java.lang.reflect.Method close
(->> (.getMethods clazz)
(filter (fn [^java.lang.reflect.Method m]
(and (= "close" (.getName m))
(empty? (.getParameterTypes m))
(= "void" (.getName (.getReturnType m))))))
(first))]
(when close
(.invoke close obj (object-array [])))))
(defn component
"Takes the same arguments as `->pool` but returns an entity compatible
with Stuart Sierra's Component: when `com.stuartsierra.component/start`
is called on it, it builds a connection pooled datasource, and returns
an entity that can either be invoked as a function with no arguments
to return that datasource, or can have `com.stuartsierra.component/stop`
called on it to shutdown the datasource (and return a new startable
entity).
If `db-spec` contains `:init-fn`, that is assumed to be a function
that should be called on the newly-created datasource. This allows for
modification of (mutable) connection pooled datasource and/or some sort
of database initialization/setup to be called automatically.
By default, the datasource is shutdown by calling `.close` on it.
If the datasource class implements `java.io.Closeable` then a direct,
type-hinted call to `.close` will be used, with no reflection,
otherwise Java reflection will be used to find the first `.close`
method in the datasource class that takes no arguments and returns `void`.
If neither of those behaviors is appropriate, you may supply a third
argument to this function -- `close-fn` -- which performs whatever
action is appropriate to your chosen datasource class."
([clazz db-spec]
(component clazz db-spec #(if (isa? clazz java.io.Closeable)
(.close ^java.io.Closeable %)
(attempt-close %))))
([clazz db-spec close-fn]
(with-meta {}
{'com.stuartsierra.component/start
(fn [_]
(let [init-fn (:init-fn db-spec)
pool (->pool clazz (dissoc db-spec :init-fn))]
(when init-fn (init-fn pool))
(with-meta (fn ^DataSource [] pool)
{'com.stuartsierra.component/stop
(fn [_]
(close-fn pool)
(component clazz db-spec close-fn))})))})))
(comment
(require '[com.stuartsierra.component :as component]
'[next.jdbc.sql :as sql])
(import '(com.mchange.v2.c3p0 ComboPooledDataSource PooledDataSource)
'(com.zaxxer.hikari HikariDataSource))
(isa? PooledDataSource java.io.Closeable) ;=> false
(isa? HikariDataSource java.io.Closeable) ;=> true
;; create a pool with a combination of JDBC URL and username/password:
(->pool HikariDataSource
{:jdbcUrl
(jdbc-url {:dbtype "mysql" :dbname "clojure_test"
:useSSL false})
:username "root" :password (System/getenv "MYSQL_ROOT_PASSWORD")})
;; use c3p0 with default reflection-based closing function:
(def dbc (component ComboPooledDataSource
{:dbtype "mysql" :dbname "clojure_test"
:user "clojure_test" :password "clojure_test"}))
;; use c3p0 with a type-hinted closing function:
(def dbc (component ComboPooledDataSource
{:dbtype "mysql" :dbname "clojure_test"
:user "clojure_test" :password "clojure_test"}
#(.close ^PooledDataSource %)))
;; use HikariCP with default Closeable .close function:
(def dbc (component HikariDataSource
{:dbtype "mysql" :dbname "clojure_test"
;; HikariCP requires :username, not :user
:username "clojure_test" :password "clojure_test"}))
;; start the chosen datasource component:
(def ds (component/start dbc))
;; invoke datasource component to get the underlying javax.sql.DataSource:
(sql/get-by-id (ds) :fruit 1)
;; stop the component and close the pooled datasource:
(component/stop ds)
)
(defn- string->url+etc
"Given a JDBC URL, return it with an empty set of options with no parsing."
[s]
[s {}])
(defn- as-properties
"Convert any seq of pairs to a `java.util.Properties` instance."
^Properties [m]
(let [p (Properties.)]
(doseq [[k v] m]
(.setProperty p (name k) (str v)))
p))
(defn uri->db-spec
"clojure.java.jdbc (and some users out there) considered the URI format
to be an acceptable JDBC URL, i.e., with credentials embdedded in the string,
rather than as query parameters.
This function accepts a URI string, optionally prefixed with `jdbc:` and
returns a db-spec hash map."
[uri]
(let [{:keys [scheme userInfo host port path query]}
(j/from-java (java.net.URI. (str/replace uri #"^jdbc:" "")))
[user password] (when (seq userInfo) (str/split userInfo #":"))
properties (when (seq query)
(into {}
(map #(let [[k v] (str/split % #"=")]
[(keyword k) v]))
(str/split query #"\&")))]
(cond-> (assoc properties
:dbtype scheme
:host host
:port port)
(seq path) (assoc :dbname (subs path 1))
user (assoc :user user)
password (assoc :password password))))
(defn- get-driver-connection
"Common logic for loading the designated JDBC driver class and
obtaining the appropriate `Connection` object."
[url timeout etc]
(when timeout (DriverManager/setLoginTimeout timeout))
(try
(DriverManager/getConnection url (as-properties etc))
(catch Exception e
(try
(let [db-spec (uri->db-spec url)
[url' etc'] (spec->url+etc db-spec)]
(DriverManager/getConnection url' (as-properties (merge etc' etc))))
(catch Exception _
;; if the fallback fails too, throw the original exception
(throw e))))))
(defn- url+etc->datasource
"Given a JDBC URL and a map of options, return a `DataSource` that can be
used to obtain a new database connection."
[[url etc]]
(let [login-timeout (atom nil)]
(reify DataSource
(getConnection [_]
(get-driver-connection url @login-timeout etc))
(getConnection [_ username password]
(get-driver-connection url @login-timeout
(assoc etc
:user username
:password password)))
(getLoginTimeout [_] (or @login-timeout 0))
(setLoginTimeout [_ secs] (reset! login-timeout secs))
(toString [_] url))))
(defn- make-connection
"Given a `DataSource` and a map of options, get a connection and update it
as specified by the options.
These options are supported:
* `:auto-commit` -- whether the connection should be set to auto-commit or not;
without this option, the default is `true` -- connections will auto-commit,
* `:read-only` -- whether the connection should be set to read-only mode,
* `:connection` -- a hash map of camelCase properties to set on the connection,
via reflection, e.g., :autoCommit, :readOnly, :schema..."
^Connection
[^DataSource datasource opts]
(let [^Connection connection (if (and (:user opts) (:password opts))
(.getConnection datasource
(:user opts)
(:password opts))
(.getConnection datasource))]
;; fast, specific option handling:
(when (contains? opts :auto-commit)
(.setAutoCommit connection (boolean (:auto-commit opts))))
(when (contains? opts :read-only)
(.setReadOnly connection (boolean (:read-only opts))))
;; slow, general-purpose option handling:
(when-let [props (:connection opts)]
(j/set-properties connection props))
connection))
(extend-protocol p/Sourceable
clojure.lang.Associative
(get-datasource [this]
;; #207 c.j.j compatibility:
(if-let [datasource (:datasource this)]
datasource
(url+etc->datasource
(if-let [uri (:connection-uri this)]
(string->url+etc uri)
(spec->url+etc this)))))
javax.sql.DataSource
(get-datasource [this] this)
String
(get-datasource [this]
(url+etc->datasource (string->url+etc this))))
(extend-protocol p/Connectable
javax.sql.DataSource
(get-connection [this opts] (make-connection this opts))
java.sql.PreparedStatement
;; note: options are ignored and this should not be closed independently
;; of the PreparedStatement to which it belongs: this done to allow
;; datafy/nav across a PreparedStatement only...
(get-connection [this _] (.getConnection this))
Object
(get-connection [this opts] (p/get-connection (p/get-datasource this) opts)))
next-jdbc-1.3.955/src/next/jdbc/datafy.clj 0000664 0000000 0000000 00000017554 14700603111 0020172 0 ustar 00root root 0000000 0000000 ;; copyright (c) 2020-2021 Sean Corfield, all rights reserved
(ns next.jdbc.datafy
"This namespace provides datafication of several JDBC object types,
all within the `java.sql` package:
* `Connection` -- datafies as a bean.
* `DatabaseMetaData` -- datafies as a bean; six properties
are navigable to produce fully-realized datafiable result sets.
* `ParameterMetaData` -- datafies as a vector of parameter descriptions.
* `ResultSet` -- datafies as a bean; if the `ResultSet` has an associated
`Statement` and that in turn has an associated `Connection` then an
additional key of `:rows` is provided which is a datafied result set,
from `next.jdbc.result-set/datafiable-result-set` with default options.
This is provided as a convenience, purely for datafication of other
JDBC data types -- in normal `next.jdbc` usage, result sets are
datafied under full user control.
* `ResultSetMetaData` -- datafies as a vector of column descriptions.
* `Statement` -- datafies as a bean.
Because different database drivers may throw `SQLException` for various
unimplemented or unavailable properties on objects in various states,
the default behavior is to return those exceptions using the `:qualify`
option for `clojure.java.data/from-java-shallow`, so for a property
`:foo`, if its corresponding getter throws an exception, it would instead
be returned as `:foo/exception`. This behavior can be overridden by
`binding` `next.jdbc.datafy/*datafy-failure*` to any of the other options
supported: `:group`, `:omit`, or `:return`. See the `clojure.java.data`
documentation for more details."
(:require [clojure.core.protocols :as core-p]
[clojure.java.data :as j]
[next.jdbc.result-set :as rs])
(:import (java.sql Connection
DatabaseMetaData
ParameterMetaData
ResultSet ResultSetMetaData
Statement)))
(set! *warn-on-reflection* true)
(def ^:private column-meta
{:catalog (fn [^ResultSetMetaData o i] (.getCatalogName o i))
:class (fn [^ResultSetMetaData o i] (.getColumnClassName o i))
:display-size (fn [^ResultSetMetaData o i] (.getColumnDisplaySize o i))
:label (fn [^ResultSetMetaData o i] (.getColumnLabel o i))
:name (fn [^ResultSetMetaData o i] (.getColumnName o i))
:precision (fn [^ResultSetMetaData o i] (.getPrecision o i))
:scale (fn [^ResultSetMetaData o i] (.getScale o i))
:schema (fn [^ResultSetMetaData o i] (.getSchemaName o i))
:table (fn [^ResultSetMetaData o i] (.getTableName o i))
:type (fn [^ResultSetMetaData o i] (.getColumnTypeName o i))
;; the is* fields:
:nullability (fn [^ResultSetMetaData o i]
(condp = (.isNullable o i)
ResultSetMetaData/columnNoNulls :not-null
ResultSetMetaData/columnNullable :null
:unknown))
:auto-increment (fn [^ResultSetMetaData o i] (.isAutoIncrement o i))
:case-sensitive (fn [^ResultSetMetaData o i] (.isCaseSensitive o i))
:currency (fn [^ResultSetMetaData o i] (.isCurrency o i))
:definitely-writable (fn [^ResultSetMetaData o i] (.isDefinitelyWritable o i))
:read-only (fn [^ResultSetMetaData o i] (.isReadOnly o i))
:searchable (fn [^ResultSetMetaData o i] (.isSearchable o i))
:signed (fn [^ResultSetMetaData o i] (.isSigned o i))
:writable (fn [^ResultSetMetaData o i] (.isWritable o i))})
(def ^:private parameter-meta
{:class (fn [^ParameterMetaData o i] (.getParameterClassName o i))
:mode (fn [^ParameterMetaData o i]
(condp = (.getParameterMode o i)
ParameterMetaData/parameterModeIn :in
ParameterMetaData/parameterModeInOut :in-out
ParameterMetaData/parameterModeOut :out
:unknown))
:precision (fn [^ParameterMetaData o i] (.getPrecision o i))
:scale (fn [^ParameterMetaData o i] (.getScale o i))
:type (fn [^ParameterMetaData o i] (.getParameterTypeName o i))
;; the is* fields:
:nullability (fn [^ParameterMetaData o i]
(condp = (.isNullable o i)
ParameterMetaData/parameterNoNulls :not-null
ParameterMetaData/parameterNullable :null
:unknown))
:signed (fn [^ParameterMetaData o i] (.isSigned o i))})
(def ^:dynamic *datafy-failure*
"How datafication failures should be handled, based on `clojure.java.data`.
Defaults to `:qualify`, but can be `:group`, `:omit`, `:qualify`, or `:return`."
:qualify)
(defn- safe-bean [o opts]
(try
(j/from-java-shallow o (assoc opts :add-class true :exceptions *datafy-failure*))
(catch Throwable t
(let [dex (juxt type (comp str ex-message))
cause (ex-cause t)]
(with-meta (cond-> {:exception (dex t)}
cause (assoc :cause (dex cause)))
{:exception t})))))
(defn- datafy-result-set-meta-data
[^ResultSetMetaData this]
(mapv #(reduce-kv (fn [m k f] (assoc m k (f this %)))
{}
column-meta)
(range 1 (inc (.getColumnCount this)))))
(defn- datafy-parameter-meta-data
[^ParameterMetaData this]
(mapv #(reduce-kv (fn [m k f] (assoc m k (f this %)))
{}
parameter-meta)
(range 1 (inc (.getParameterCount this)))))
(extend-protocol core-p/Datafiable
Connection
(datafy [this] (safe-bean this {}))
DatabaseMetaData
(datafy [this]
(with-meta (let [data (safe-bean this {})]
(cond-> data
(not (:exception (meta data)))
;; add an opaque object that nav will "replace"
(assoc :all-tables (Object.))))
{`core-p/nav (fn [_ k v]
(condp = k
:all-tables
(rs/datafiable-result-set (.getTables this nil nil nil nil)
(.getConnection this)
{})
:catalogs
(rs/datafiable-result-set (.getCatalogs this)
(.getConnection this)
{})
:clientInfoProperties
(rs/datafiable-result-set (.getClientInfoProperties this)
(.getConnection this)
{})
:schemas
(rs/datafiable-result-set (.getSchemas this)
(.getConnection this)
{})
:tableTypes
(rs/datafiable-result-set (.getTableTypes this)
(.getConnection this)
{})
:typeInfo
(rs/datafiable-result-set (.getTypeInfo this)
(.getConnection this)
{})
v))}))
ParameterMetaData
(datafy [this] (datafy-parameter-meta-data this))
ResultSet
(datafy [this]
;; SQLite has a combination ResultSet/Metadata object...
(if (instance? ResultSetMetaData this)
(datafy-result-set-meta-data this)
(let [s (.getStatement this)
c (when s (.getConnection s))]
(cond-> (safe-bean this {})
c (assoc :rows (rs/datafiable-result-set this c {}))))))
ResultSetMetaData
(datafy [this] (datafy-result-set-meta-data this))
Statement
(datafy [this] (safe-bean this {:omit #{:moreResults}})))
next-jdbc-1.3.955/src/next/jdbc/date_time.clj 0000664 0000000 0000000 00000011077 14700603111 0020647 0 ustar 00root root 0000000 0000000 ;; copyright (c) 2019-2021 Sean Corfield, all rights reserved
(ns next.jdbc.date-time
"Optional namespace that extends `next.jdbc.prepare/SettableParameter`
to various date/time types so that they will all be treated as SQL
timestamps (which also supports date and time column types) and has
functions to extend `next.jdbc.result-set/ReadableColumn`.
Simply requiring this namespace will extend the `SettableParameter` protocol
to the four types listed below.
In addition, there are several `read-as-*` functions here that will
extend `next.jdbc.result-set/ReadableColumn` to allow `java.sql.Date`
and `java.sql.Timestamp` columns to be read as (converted to) various
Java Time types automatically. The expectation is that you will call at
most one of these, at application startup, to enable the behavior you want.
Database support for Java Time:
* H2 and SQLite support conversion of Java Time (`Instant`, `LocalDate`,
`LocalDateTime`) out of the box,
* Nearly all databases support conversion of `java.util.Date` out of
the box -- except PostgreSQL apparently!
Types supported by this namespace:
* `java.time.Instant`
* `java.time.LocalDate`
* `java.time.LocalDateTime`
* `java.util.Date` -- mainly for PostgreSQL
PostgreSQL does not seem able to convert `java.util.Date` to a SQL
timestamp by default (every other database can!) so you'll probably
need to require this namespace, even if you don't use Java Time, when
working with PostgreSQL."
(:require [next.jdbc.prepare :as p]
[next.jdbc.result-set :as rs])
(:import (java.sql PreparedStatement Timestamp)))
(set! *warn-on-reflection* true)
(extend-protocol p/SettableParameter
;; Java Time type conversion:
java.time.Instant
(set-parameter [^java.time.Instant v ^PreparedStatement s ^long i]
(.setTimestamp s i (Timestamp/from v)))
java.time.LocalDate
(set-parameter [^java.time.LocalDate v ^PreparedStatement s ^long i]
(.setTimestamp s i (Timestamp/valueOf (.atStartOfDay v))))
java.time.LocalDateTime
(set-parameter [^java.time.LocalDateTime v ^PreparedStatement s ^long i]
(.setTimestamp s i (Timestamp/valueOf v)))
;; this is just to help PostgreSQL:
java.util.Date
(set-parameter [^java.util.Date v ^PreparedStatement s ^long i]
(.setTimestamp s i (Timestamp/from (.toInstant v))))
;; but avoid unnecessary conversions for SQL Date and Timestamp:
java.sql.Date
(set-parameter [^java.sql.Date v ^PreparedStatement s ^long i]
(.setDate s i v))
java.sql.Timestamp
(set-parameter [^java.sql.Timestamp v ^PreparedStatement s ^long i]
(.setTimestamp s i v)))
(defn read-as-instant
"After calling this function, `next.jdbc.result-set/ReadableColumn`
will be extended to (`java.sql.Date` and) `java.sql.Timestamp` so that any
timestamp columns will automatically be read as `java.time.Instant`.
Note that `java.sql.Date` columns will still be returned as-is because they
cannot be converted to an instant (they lack a time component)."
[]
(extend-protocol rs/ReadableColumn
java.sql.Date
(read-column-by-label [^java.sql.Date v _] v)
(read-column-by-index [^java.sql.Date v _2 _3] v)
java.sql.Timestamp
(read-column-by-label [^java.sql.Timestamp v _] (.toInstant v))
(read-column-by-index [^java.sql.Timestamp v _2 _3] (.toInstant v))))
(defn read-as-local
"After calling this function, `next.jdbc.result-set/ReadableColumn`
will be extended to `java.sql.Date` and `java.sql.Timestamp` so that any
date or timestamp columns will automatically be read as `java.time.LocalDate`
or `java.time.LocalDateTime` respectively."
[]
(extend-protocol rs/ReadableColumn
java.sql.Date
(read-column-by-label [^java.sql.Date v _] (.toLocalDate v))
(read-column-by-index [^java.sql.Date v _2 _3] (.toLocalDate v))
java.sql.Timestamp
(read-column-by-label [^java.sql.Timestamp v _] (.toLocalDateTime v))
(read-column-by-index [^java.sql.Timestamp v _2 _3] (.toLocalDateTime v))))
(defn read-as-default
"After calling this function, `next.jdbc.result-set/ReadableColumn`
will be extended to `java.sql.Date` and `java.sql.Timestamp` so that any
date or timestamp columns will be read as-is. This is provided for
completeness, to undo the effects of `read-as-instant` or `read-as-local`."
[]
(extend-protocol rs/ReadableColumn
java.sql.Date
(read-column-by-label [^java.sql.Date v _] v)
(read-column-by-index [^java.sql.Date v _2 _3] v)
java.sql.Timestamp
(read-column-by-label [^java.sql.Timestamp v _] v)
(read-column-by-index [^java.sql.Timestamp v _2 _3] v)))
next-jdbc-1.3.955/src/next/jdbc/default_options.clj 0000664 0000000 0000000 00000002754 14700603111 0022115 0 ustar 00root root 0000000 0000000 ;; copyright (c) 2020-2021 Sean Corfield, all rights reserved
(ns ^:no-doc next.jdbc.default-options
"Implementation of default options logic."
(:require [next.jdbc.protocols :as p]))
(set! *warn-on-reflection* true)
(defrecord DefaultOptions [connectable options])
(extend-protocol p/Sourceable
DefaultOptions
(get-datasource [this]
(p/get-datasource (:connectable this))))
(extend-protocol p/Connectable
DefaultOptions
(get-connection [this opts]
(p/get-connection (:connectable this)
(merge (:options this) opts))))
(extend-protocol p/Executable
DefaultOptions
(-execute [this sql-params opts]
(p/-execute (:connectable this) sql-params
(merge (:options this) opts)))
(-execute-one [this sql-params opts]
(p/-execute-one (:connectable this) sql-params
(merge (:options this) opts)))
(-execute-all [this sql-params opts]
(p/-execute-all (:connectable this) sql-params
(merge (:options this) opts))))
(extend-protocol p/Preparable
DefaultOptions
(prepare [this sql-params opts]
(p/prepare (:connectable this) sql-params
(merge (:options this) opts))))
(extend-protocol p/Transactable
DefaultOptions
(-transact [this body-fn opts]
(p/-transact (:connectable this) body-fn
(merge (:options this) opts))))
next-jdbc-1.3.955/src/next/jdbc/defer.clj 0000664 0000000 0000000 00000006422 14700603111 0017777 0 ustar 00root root 0000000 0000000 ;; copyright (c) 2024 Sean Corfield, all rights reserved
(ns next.jdbc.defer
"The idea behind the next.jdbc.defer namespace is to provide a
way to defer the execution of a series of SQL statements until
a later time, but still provide a way for inserted keys to be
used in later SQL statements.
The principle is to provide a core subset of the next.jdbc
and next.jdbc.sql API that produces a data structure that
describes a series of SQL operations to be performed, that
are held in a dynamic var, and that can be executed at a
later time, in a transaction."
(:require [next.jdbc :as jdbc]
[next.jdbc.sql.builder :refer [for-delete for-insert for-update]]))
(set! *warn-on-reflection* true)
(def ^:private ^:dynamic *deferred* nil)
(defn execute-one!
"Given a vector containing a SQL statement and parameters, defer
execution of that statement."
([sql-p]
(execute-one! sql-p {}))
([sql-p opts]
(swap! *deferred* conj
{:sql-p sql-p
:key-fn (or (:key-fn opts) (comp first vals))
:key (:key opts)
:opts opts})))
(defn insert!
"Given a table name, and a data hash map, defer an insertion of the
data as a single row in the database."
([table key-map]
(insert! table key-map {}))
([table key-map opts]
(swap! *deferred* conj
{:sql-p (for-insert table key-map opts)
:key-fn (or (:key-fn opts) (comp first vals))
:key (:key opts)
:opts opts})))
(defn update!
"Given a table name, a hash map of columns and values to set, and
either a hash map of columns and values to search on or a vector
of a SQL where clause and parameters, defer an update on the table."
([table key-map where-params]
(update! table key-map where-params {}))
([table key-map where-params opts]
(swap! *deferred* conj
{:sql-p (for-update table key-map where-params opts)
:opts opts})))
(defn delete!
"Given a table name, and either a hash map of columns and values
to search on or a vector of a SQL where clause and parameters,
defer a delete on the table."
([table where-params]
(delete! table where-params {}))
([table where-params opts]
(swap! *deferred* conj
{:sql-p (for-delete table where-params opts)
:opts opts})))
(defn deferrable [transactable stmts]
(reify clojure.lang.IDeref
(deref [_]
(let [keys (atom {})]
(jdbc/with-transaction [conn transactable]
(doseq [{:keys [sql-p key-fn key opts]} @stmts]
(let [sql-p
(mapv (fn [v]
(if (keyword? v)
(if (contains? @keys v)
(get @keys v)
(throw (ex-info (str "Deferred key not found " v)
{:key v})))
v))
sql-p)
result (jdbc/execute-one! conn sql-p opts)]
(when key
(swap! keys assoc key (key-fn result))))))
@keys))))
(defn defer-ops [f]
(binding [*deferred* (atom [])]
(f)
*deferred*))
(defmacro with-deferred [connectable & body]
`(let [conn# ~connectable]
(deferrable conn# (defer-ops (^{:once true} fn* [] ~@body)))))
next-jdbc-1.3.955/src/next/jdbc/optional.clj 0000664 0000000 0000000 00000012455 14700603111 0020542 0 ustar 00root root 0000000 0000000 ;; copyright (c) 2019-2024 Sean Corfield, all rights reserved
(ns next.jdbc.optional
"Builders that treat NULL SQL values as 'optional' and omit the
corresponding keys from the Clojure hash maps for the rows."
(:require [next.jdbc.result-set :as rs])
(:import (java.sql ResultSet)
(java.util Locale)))
(set! *warn-on-reflection* true)
(defrecord MapResultSetOptionalBuilder [^ResultSet rs rsmeta cols]
rs/RowBuilder
(->row [_this] (transient {}))
(column-count [_this] (count cols))
(with-column [this row i]
;; short-circuit on null to avoid column reading logic
(let [v (.getObject rs ^Integer i)]
(if (nil? v)
row
(rs/with-column-value this row (nth cols (dec i))
(rs/read-column-by-index v rsmeta i)))))
(with-column-value [_this row col v]
;; ensure that even if this is adapted, we omit null columns
(if (nil? v)
row
(assoc! row col v)))
(row! [_this row] (persistent! row))
rs/ResultSetBuilder
(->rs [_this] (transient []))
(with-row [_this mrs row]
(conj! mrs row))
(rs! [_this mrs] (persistent! mrs)))
(defn as-maps
"Given a `ResultSet` and options, return a `RowBuilder` / `ResultSetBuilder`
that produces bare vectors of hash map rows, with nil columns omitted."
[^ResultSet rs opts]
(let [rsmeta (.getMetaData rs)
cols (rs/get-column-names rsmeta opts)]
(->MapResultSetOptionalBuilder rs rsmeta cols)))
(defn as-unqualified-maps
"Given a `ResultSet` and options, return a `RowBuilder` / `ResultSetBuilder`
that produces bare vectors of hash map rows, with simple keys and nil
columns omitted."
[^ResultSet rs opts]
(let [rsmeta (.getMetaData rs)
cols (rs/get-unqualified-column-names rsmeta opts)]
(->MapResultSetOptionalBuilder rs rsmeta cols)))
(defn as-modified-maps
"Given a `ResultSet` and options, return a `RowBuilder` / `ResultSetBuilder`
that produces bare vectors of hash map rows, with modified keys and nil
columns omitted.
Requires both the `:qualifier-fn` and `:label-fn` options."
[^ResultSet rs opts]
(let [rsmeta (.getMetaData rs)
cols (rs/get-modified-column-names rsmeta opts)]
(->MapResultSetOptionalBuilder rs rsmeta cols)))
(defn as-unqualified-modified-maps
"Given a `ResultSet` and options, return a `RowBuilder` / `ResultSetBuilder`
that produces bare vectors of hash map rows, with simple, modified keys
and nil columns omitted.
Requires the `:label-fn` option."
[^ResultSet rs opts]
(let [rsmeta (.getMetaData rs)
cols (rs/get-unqualified-modified-column-names rsmeta opts)]
(->MapResultSetOptionalBuilder rs rsmeta cols)))
(defn- lower-case
"Converts a string to lower case in the US locale to avoid problems in
locales where the lower case version of a character is not a valid SQL
entity name (e.g., Turkish)."
[^String s]
(.toLowerCase s Locale/US))
(defn as-lower-maps
"Given a `ResultSet` and options, return a `RowBuilder` / `ResultSetBuilder`
that produces bare vectors of hash map rows, with lower-case keys and nil
columns omitted."
[rs opts]
(as-modified-maps rs (assoc opts
:qualifier-fn lower-case
:label-fn lower-case)))
(defn as-unqualified-lower-maps
"Given a `ResultSet` and options, return a `RowBuilder` / `ResultSetBuilder`
that produces bare vectors of hash map rows, with simple, lower-case keys
and nil columns omitted."
[rs opts]
(as-unqualified-modified-maps rs (assoc opts :label-fn lower-case)))
(defn as-maps-adapter
"Given a map builder function (e.g., `as-lower-maps`) and a column reading
function, return a new builder function that uses that column reading
function instead of `.getObject` so you can override the default behavior.
This adapter omits SQL NULL values, even if the underlying builder does not.
The default column-reader behavior would be equivalent to:
(defn default-column-reader
[^ResultSet rs ^ResultSetMetaData rsmeta ^Integer i]
(.getObject rs i))
Your column-reader can use the result set metadata to determine whether
to call `.getObject` or some other method to read the column's value.
`read-column-by-index` is still called on the result of that read, if
it is not `nil`."
[builder-fn column-reader]
(fn [rs opts]
(let [mrsb (builder-fn rs opts)]
(reify
rs/RowBuilder
(->row [_this] (rs/->row mrsb))
(column-count [_this] (rs/column-count mrsb))
(with-column [_this row i]
;; short-circuit on null to avoid column reading logic
(let [v (column-reader rs (:rsmeta mrsb) i)]
(if (nil? v)
row
(rs/with-column-value mrsb row (nth (:cols mrsb) (dec i))
(rs/read-column-by-index v (:rsmeta mrsb) i)))))
(with-column-value [_this row col v]
;; ensure that even if this is adapted, we omit null columns
(if (nil? v)
row
(rs/with-column-value mrsb row col v)))
(row! [_this row] (rs/row! mrsb row))
rs/ResultSetBuilder
(->rs [_this] (rs/->rs mrsb))
(with-row [_this mrs row] (rs/with-row mrsb mrs row))
(rs! [_this mrs] (rs/rs! mrsb mrs))
clojure.lang.ILookup
(valAt [_this k] (get mrsb k))
(valAt [_this k not-found] (get mrsb k not-found))))))
next-jdbc-1.3.955/src/next/jdbc/plan.clj 0000664 0000000 0000000 00000004762 14700603111 0017651 0 ustar 00root root 0000000 0000000 ;; copyright (c) 2020-2021 Sean Corfield, all rights reserved
(ns next.jdbc.plan
"Some helper functions that make common operations with `next.jdbc/plan`
much easier."
(:require [next.jdbc :as jdbc]))
(set! *warn-on-reflection* true)
(defn select-one!
"Execute the SQL and params using `next.jdbc/plan` and return just the
selected columns from just the first row.
`(plan/select-one! ds [:total] [\"select count(*) as total from table\"])`
;;=> {:total 42}
If the `cols` argument is a vector of columns to select, then it is
applied using `select-keys`, otherwise, the `cols` argument is used as
a function directly. That means it can be a simple keyword to return
just that column -- which is the most common expected usage:
`(plan/select-one! ds :total [\"select count(*) as total from table\"])`
;;=> 42
The usual caveats apply about operations on a raw result set that
can be done without realizing the whole row."
([connectable cols sql-params]
(select-one! connectable cols sql-params {}))
([connectable cols sql-params opts]
(reduce (fn [_ row] (reduced (if (vector? cols)
(select-keys row cols)
(cols row))))
nil
(jdbc/plan connectable sql-params opts))))
(defn select!
"Execute the SQL and params using `next.jdbc/plan` and (by default)
return a vector of rows with just the selected columns.
`(plan/select! ds [:id :name] [\"select * from table\"])`
If the `cols` argument is a vector of columns to select, then it is
applied as:
`(into [] (map #(select-keys % cols)) (jdbc/plan ...))`
Otherwise, the `cols` argument is used as a function and mapped over
the raw result set as:
`(into [] (map cols) (jdbc/plan ...))`
The usual caveats apply about operations on a raw result set that
can be done without realizing the whole row.
Note: this allows for the following usage, which returns a vector
of all the values for a single column:
`(plan/select! ds :id [\"select * from table\"])`
The result is a vector by default, but can be changed using the
`:into` option to provide the initial data structure into which
the selected columns are poured, e.g., `:into #{}`"
([connectable cols sql-params]
(select! connectable cols sql-params {}))
([connectable cols sql-params opts]
(into (or (:into opts) [])
(map (if (vector? cols)
#(select-keys % cols)
cols))
(jdbc/plan connectable sql-params opts))))
next-jdbc-1.3.955/src/next/jdbc/prepare.clj 0000664 0000000 0000000 00000020636 14700603111 0020353 0 ustar 00root root 0000000 0000000 ;; copyright (c) 2018-2021 Sean Corfield, all rights reserved
(ns next.jdbc.prepare
"Mostly an implementation namespace for how `PreparedStatement` objects are
created by the next generation java.jdbc library.
`set-parameters` is public and may be useful if you have a `PreparedStatement`
that you wish to reuse and (re)set the parameters on it.
Defines the `SettableParameter` protocol for converting Clojure values
to database-specific values.
See also https://cljdoc.org/d/com.github.seancorfield/next.jdbc/CURRENT/api/next.jdbc.date-time
for implementations of `SettableParameter` that provide automatic
conversion of Java Time objects to SQL data types.
See also https://cljdoc.org/d/com.github.seancorfield/next.jdbc/CURRENT/api/next.jdbc.types
for `as-xxx` functions that provide per-instance implementations of
`SettableParameter` for each of the standard `java.sql.Types` values."
(:require [clojure.java.data :as j]
[next.jdbc.protocols :as p])
(:import (java.sql Connection
PreparedStatement
ResultSet
Statement)))
(set! *warn-on-reflection* true)
(defprotocol SettableParameter
"Protocol for setting SQL parameters in statement objects, which
can convert from Clojure values. The default implementation just
calls `.setObject` on the parameter value. It can be extended to
use other methods of `PreparedStatement` to convert and set parameter
values. Extension via metadata is supported."
:extend-via-metadata true
(set-parameter [val stmt ix]
"Convert a Clojure value into a SQL value and store it as the ix'th
parameter in the given SQL statement object."))
(extend-protocol SettableParameter
Object
(set-parameter [v ^PreparedStatement s ^long i]
(.setObject s i v))
nil
(set-parameter [_ ^PreparedStatement s ^long i]
(.setObject s i nil)))
(defn set-parameters
"Given a `PreparedStatement` and a vector of parameter values, update the
`PreparedStatement` with those parameters and return it."
^java.sql.PreparedStatement
[^PreparedStatement ps params]
(when (seq params)
(loop [[p & more] params i 1]
(set-parameter p ps i)
(when more
(recur more (inc i)))))
ps)
(def ^{:private true
:doc "Map friendly `:concurrency` values to `ResultSet` constants."}
result-set-concurrency
{:read-only ResultSet/CONCUR_READ_ONLY
:updatable ResultSet/CONCUR_UPDATABLE})
(def ^{:private true
:doc "Map friendly `:cursors` values to `ResultSet` constants."}
result-set-holdability
{:hold ResultSet/HOLD_CURSORS_OVER_COMMIT
:close ResultSet/CLOSE_CURSORS_AT_COMMIT})
(def ^{:private true
:doc "Map friendly `:type` values to `ResultSet` constants."}
result-set-type
{:forward-only ResultSet/TYPE_FORWARD_ONLY
:scroll-insensitive ResultSet/TYPE_SCROLL_INSENSITIVE
:scroll-sensitive ResultSet/TYPE_SCROLL_SENSITIVE})
(defn- ^{:tag (class (into-array String []))} string-array
[return-keys]
(into-array String (map name return-keys)))
(defn create
"This is an implementation detail -- use `next.jdbc/prepare` instead.
Given a `Connection`, a SQL string, some parameters, and some options,
return a `PreparedStatement` representing that."
^java.sql.PreparedStatement
[^Connection con ^String sql params
{:keys [return-keys result-type concurrency cursors
fetch-size max-rows timeout]
:as opts}]
(let [^PreparedStatement ps
(cond
return-keys
(do
(when (or result-type concurrency cursors)
(throw (IllegalArgumentException.
(str ":concurrency, :cursors, and :result-type "
"may not be specified with :return-keys."))))
(if (vector? return-keys)
(let [key-names (string-array return-keys)]
(try
(try
(.prepareStatement con sql key-names)
(catch Exception _
;; assume it is unsupported and try regular generated keys:
(.prepareStatement con sql Statement/RETURN_GENERATED_KEYS)))
(catch Exception _
;; assume it is unsupported and try basic PreparedStatement:
(.prepareStatement con sql))))
(try
(.prepareStatement con sql Statement/RETURN_GENERATED_KEYS)
(catch Exception _
;; assume it is unsupported and try basic PreparedStatement:
(.prepareStatement con sql)))))
(and result-type concurrency)
(if cursors
(.prepareStatement con sql
(get result-set-type result-type result-type)
(get result-set-concurrency concurrency concurrency)
(get result-set-holdability cursors cursors))
(.prepareStatement con sql
(get result-set-type result-type result-type)
(get result-set-concurrency concurrency concurrency)))
(or result-type concurrency cursors)
(throw (IllegalArgumentException.
(str ":concurrency, :cursors, and :result-type "
"may not be specified independently.")))
:else
(.prepareStatement con sql))]
;; fast, specific option handling:
(when fetch-size
(.setFetchSize ps fetch-size))
(when max-rows
(.setMaxRows ps max-rows))
(when timeout
(.setQueryTimeout ps timeout))
;; slow, general-purpose option handling:
(when-let [props (:statement opts)]
(j/set-properties ps props))
(set-parameters ps params)))
(extend-protocol p/Preparable
java.sql.Connection
(prepare [this sql-params opts]
(create this (first sql-params) (rest sql-params) opts)))
(defn statement
"Given a `Connection` and some options, return a `Statement`."
(^java.sql.Statement
[con] (statement con {}))
(^java.sql.Statement
[^Connection con
{:keys [result-type concurrency cursors
fetch-size max-rows timeout]
:as opts}]
(let [^Statement stmt
(cond
(and result-type concurrency)
(if cursors
(.createStatement con
(get result-set-type result-type result-type)
(get result-set-concurrency concurrency concurrency)
(get result-set-holdability cursors cursors))
(.createStatement con
(get result-set-type result-type result-type)
(get result-set-concurrency concurrency concurrency)))
(or result-type concurrency cursors)
(throw (IllegalArgumentException.
(str ":concurrency, :cursors, and :result-type "
"may not be specified independently.")))
:else
(.createStatement con))]
;; fast, specific option handling:
(when fetch-size
(.setFetchSize stmt fetch-size))
(when max-rows
(.setMaxRows stmt max-rows))
(when timeout
(.setQueryTimeout stmt timeout))
;; slow, general-purpose option handling:
(when-let [props (:statement opts)]
(j/set-properties stmt props))
stmt)))
(def ^:private d-r-s (volatile! nil))
(defn ^:no-doc execute-batch!
"As of 1.1.643, `next.jdbc.prepare/execute-batch!`
(this function) is deprecated.
Use `next.jdbc/execute-batch!` instead."
([ps param-groups]
(execute-batch! ps param-groups {}))
([^PreparedStatement ps param-groups opts]
(let [gen-ks (when (:return-generated-keys opts)
(when-let [drs @d-r-s]
#(drs (.getGeneratedKeys ^PreparedStatement %)
(p/get-connection ps {})
opts)))
params (if-let [n (:batch-size opts)]
(if (and (number? n) (pos? n))
(partition-all n param-groups)
(throw (IllegalArgumentException.
":batch-size must be positive")))
[param-groups])]
(into []
(mapcat (fn [group]
(run! #(.addBatch (set-parameters ps %)) group)
(let [result (if (:large opts)
(.executeLargeBatch ps)
(.executeBatch ps))]
(if gen-ks (gen-ks ps) result))))
params))))
next-jdbc-1.3.955/src/next/jdbc/protocols.clj 0000664 0000000 0000000 00000005417 14700603111 0020741 0 ustar 00root root 0000000 0000000 ;; copyright (c) 2018-2021 Sean Corfield, all rights reserved
(ns next.jdbc.protocols
"This is the extensible core of the next generation java.jdbc library.
* `Sourceable` -- for producing `javax.sql.DataSource` objects,
* `Connectable` -- for producing new `java.sql.Connection` objects,
* `Executable` -- for executing SQL operations,
* `Preparable` -- for producing new `java.sql.PreparedStatement` objects,
* `Transactable` -- for executing SQL operations transactionally.")
(set! *warn-on-reflection* true)
(defprotocol Sourceable
"Protocol for producing a `javax.sql.DataSource`.
Implementations are provided for strings, hash maps (`db-spec` structures),
and also a `DataSource` (which just returns itself).
Extension via metadata is supported."
:extend-via-metadata true
(get-datasource ^javax.sql.DataSource [this]
"Produce a `javax.sql.DataSource`."))
(defprotocol Connectable
"Protocol for producing a new JDBC connection that should be closed when you
are finished with it.
Implementations are provided for `DataSource`, `PreparedStatement`, and
`Object`, on the assumption that an `Object` can be turned into a `DataSource`."
(get-connection ^java.sql.Connection [this opts]
"Produce a new `java.sql.Connection` for use with `with-open`."))
(defprotocol Executable
"Protocol for executing SQL operations.
Implementations are provided for `Connection`, `DataSource`,
`PreparedStatement`, and `Object`, on the assumption that an `Object` can be
turned into a `DataSource` and therefore used to get a `Connection`."
(-execute ^clojure.lang.IReduceInit [this sql-params opts]
"Produce a 'reducible' that, when reduced (with an initial value), executes
the SQL and processes the rows of the `ResultSet` directly.")
(-execute-one [this sql-params opts]
"Executes the SQL or DDL and produces the first row of the `ResultSet`
as a fully-realized, datafiable hash map (by default).")
(-execute-all [this sql-params opts]
"Executes the SQL and produces (by default) a vector of
fully-realized, datafiable hash maps from the `ResultSet`."))
(defprotocol Preparable
"Protocol for producing a new `java.sql.PreparedStatement` that should
be closed after use. Can be used by `Executable` functions.
Implementation is provided for `Connection` only."
(prepare ^java.sql.PreparedStatement [this sql-params opts]
"Produce a new `java.sql.PreparedStatement` for use with `with-open`."))
(defprotocol Transactable
"Protocol for running SQL operations in a transaction.
Implementations are provided for `Connection`, `DataSource`, and `Object`
(on the assumption that an `Object` can be turned into a `DataSource`)."
:extend-via-metadata true
(-transact [this body-fn opts]
"Run the `body-fn` inside a transaction."))
next-jdbc-1.3.955/src/next/jdbc/quoted.clj 0000664 0000000 0000000 00000002046 14700603111 0020211 0 ustar 00root root 0000000 0000000 ;; copyright (c) 2019-2023 Sean Corfield, all rights reserved
(ns next.jdbc.quoted
"Provides functions for use with the `:table-fn` and `:column-fn` options
that define how SQL entities should be quoted in strings constructed
from Clojure data."
(:require [clojure.string :as str]))
(set! *warn-on-reflection* true)
(defn strop
"Escape any embedded closing strop characters."
[s x e]
(str s (str/replace x (str e) (str e e)) e))
(defn ansi "ANSI \"quoting\"" [s] (strop \" s \"))
(defn mysql "MySQL `quoting`" [s] (strop \` s \`))
(defn sql-server "SQL Server [quoting]" [s] (strop \[ s \]))
(def oracle "Oracle \"quoting\" (ANSI)" ansi)
(def postgres "PostgreSQL \"quoting\" (ANSI)" ansi)
(defn schema
"Given a quoting function, return a new quoting function that will
process schema-qualified names by quoting each segment:
```clojure
(mysql (name :foo.bar)) ;=> `foo.bar`
((schema mysql) (name :foo.bar)) ;=> `foo`.`bar`
```
"
[quoting]
(fn [s]
(->> (str/split s #"\.")
(map quoting)
(str/join "."))))
next-jdbc-1.3.955/src/next/jdbc/result_set.clj 0000664 0000000 0000000 00000134405 14700603111 0021106 0 ustar 00root root 0000000 0000000 ;; copyright (c) 2018-2024 Sean Corfield, all rights reserved
(ns next.jdbc.result-set
"An implementation of `ResultSet` handling functions.
Defines the following protocols:
* `DatafiableRow` -- for turning a row into something datafiable
* `ReadableColumn` -- to read column values by label or index
* `RowBuilder` -- for materializing a row
* `ResultSetBuilder` -- for materializing a result set
A broad range of result set builder implementation functions are provided.
Also provides the default implementations for `Executable` and
the default `datafy`/`nav` behavior for rows from a result set.
See also https://cljdoc.org/d/com.github.seancorfield/next.jdbc/CURRENT/api/next.jdbc.date-time
for implementations of `ReadableColumn` that provide automatic
conversion of some SQL data types to Java Time objects."
(:require [camel-snake-kebab.core :refer [->kebab-case]]
[clojure.core.protocols :as core-p]
[clojure.core.reducers :as r]
[clojure.datafy :as d]
[next.jdbc.prepare :as prepare]
[next.jdbc.protocols :as p])
(:import (java.sql Clob
PreparedStatement
ResultSet ResultSetMetaData
Statement
SQLException)
(java.util Locale)
(java.util.concurrent ForkJoinPool ForkJoinTask)))
(set! *warn-on-reflection* true)
(defn- get-table-name
"No, if it isn't supported, you're supposed to return
an empty string. That's what the JDBC docs say!"
[^ResultSetMetaData rsmeta ^Integer i]
(try
(.getTableName rsmeta i)
(catch java.sql.SQLFeatureNotSupportedException _
"")))
(defn get-column-names
"Given `ResultSetMetaData`, return a vector of column names, each qualified by
the table from which it came."
[^ResultSetMetaData rsmeta _]
(mapv (fn [^Integer i]
(if-let [q (not-empty (get-table-name rsmeta i))]
(keyword q (.getColumnLabel rsmeta i))
(keyword (.getColumnLabel rsmeta i))))
(range 1 (inc (if rsmeta (.getColumnCount rsmeta) 0)))))
(defn get-unqualified-column-names
"Given `ResultSetMetaData`, return a vector of unqualified column names."
[^ResultSetMetaData rsmeta _]
(mapv (fn [^Integer i] (keyword (.getColumnLabel rsmeta i)))
(range 1 (inc (if rsmeta (.getColumnCount rsmeta) 0)))))
(defn get-modified-column-names
"Given `ResultSetMetaData`, return a vector of modified column names, each
qualified by the table from which it came.
Requires both the `:qualifier-fn` and `:label-fn` options."
[^ResultSetMetaData rsmeta opts]
(let [qf (:qualifier-fn opts)
lf (:label-fn opts)]
(assert qf ":qualifier-fn is required")
(assert lf ":label-fn is required")
(mapv (fn [^Integer i]
(if-let [q (some-> (get-table-name rsmeta i) (qf) (not-empty))]
(keyword q (-> (.getColumnLabel rsmeta i) (lf)))
(keyword (-> (.getColumnLabel rsmeta i) (lf)))))
(range 1 (inc (if rsmeta (.getColumnCount rsmeta) 0))))))
(defn get-unqualified-modified-column-names
"Given `ResultSetMetaData`, return a vector of unqualified modified column
names.
Requires the `:label-fn` option."
[^ResultSetMetaData rsmeta opts]
(let [lf (:label-fn opts)]
(assert lf ":label-fn is required")
(mapv (fn [^Integer i] (keyword (lf (.getColumnLabel rsmeta i))))
(range 1 (inc (if rsmeta (.getColumnCount rsmeta) 0))))))
(defn- lower-case
"Converts a string to lower case in the US locale to avoid problems in
locales where the lower case version of a character is not a valid SQL
entity name (e.g., Turkish)."
[^String s]
(.toLowerCase s Locale/US))
(defn get-lower-column-names
"Given `ResultSetMetaData`, return a vector of lower-case column names, each
qualified by the table from which it came."
[rsmeta opts]
(get-modified-column-names rsmeta (assoc opts
:qualifier-fn lower-case
:label-fn lower-case)))
(defn get-unqualified-lower-column-names
"Given `ResultSetMetaData`, return a vector of unqualified column names."
[rsmeta opts]
(get-unqualified-modified-column-names rsmeta
(assoc opts :label-fn lower-case)))
(defprotocol ReadableColumn
"Protocol for reading objects from the `java.sql.ResultSet`. Default
implementations (for `Object` and `nil`) return the argument, and the
`Boolean` implementation ensures a canonicalized `true`/`false` value,
but it can be extended to provide custom behavior for special types.
Extension via metadata is supported."
:extend-via-metadata true
(read-column-by-label [val label]
"Function for transforming values after reading them via a column label.")
(read-column-by-index [val rsmeta idx]
"Function for transforming values after reading them via a column index."))
(extend-protocol ReadableColumn
Object
(read-column-by-label [x _] x)
(read-column-by-index [x _2 _3] x)
Boolean
(read-column-by-label [x _] (if (= true x) true false))
(read-column-by-index [x _2 _3] (if (= true x) true false))
nil
(read-column-by-label [_1 _2] nil)
(read-column-by-index [_1 _2 _3] nil))
(defprotocol RowBuilder
"Protocol for building rows in various representations.
The default implementation for building hash maps: `MapResultSetBuilder`"
(->row [_]
"Called once per row to create the basis of each row.")
(column-count [_]
"Return the number of columns in each row.")
(with-column [_ row i]
"Called with the row and the index of the column to be added;
this is expected to read the column value from the `ResultSet`!")
(with-column-value [_ row col v]
"Called with the row, the column name, and the value to be added;
this is a low-level function, typically used by `with-column`.")
(row! [_ row]
"Called once per row to finalize each row once it is complete."))
(defprotocol ResultSetBuilder
"Protocol for building result sets in various representations.
Default implementations for building vectors of hash maps and vectors of
column names and row values: `MapResultSetBuilder` & `ArrayResultSetBuilder`"
(->rs [_]
"Called to create the basis of the result set.")
(with-row [_ rs row]
"Called with the result set and the row to be added.")
(rs! [_ rs]
"Called to finalize the result set once it is complete."))
(defn builder-adapter
"Given any builder function (e.g., `as-lower-maps`) and a column reading
function, return a new builder function that uses that column reading
function instead of `.getObject` and `read-column-by-index` so you can
override the default behavior.
The default column-by-index-fn behavior would be equivalent to:
(defn default-column-by-index-fn
[builder ^ResultSet rs ^Integer i]
(read-column-by-index (.getObject rs i) (:rsmeta builder) i))
Your column-by-index-fn can use the result set metadata `(:rsmeta builder)`
and/or the (processed) column name `(nth (:cols builder) (dec i))` to
determine whether to call `.getObject` or some other method to read the
column's value, and can choose whether or not to use the `ReadableColumn`
protocol-based value processor (and could add metadata to the value to
satisfy that protocol on a per-instance basis)."
[builder-fn column-by-index-fn]
(fn [rs opts]
(let [builder (builder-fn rs opts)]
(reify
RowBuilder
(->row [_this] (->row builder))
(column-count [_this] (column-count builder))
(with-column [this row i]
(with-column-value this row (nth (:cols builder) (dec i))
(column-by-index-fn builder rs i)))
(with-column-value [_this row col v]
(with-column-value builder row col v))
(row! [_this row] (row! builder row))
ResultSetBuilder
(->rs [_this] (->rs builder))
(with-row [_this mrs row] (with-row builder mrs row))
(rs! [_this mrs] (rs! builder mrs))
clojure.lang.ILookup
(valAt [_this k] (get builder k))
(valAt [_this k not-found] (get builder k not-found))))))
(defrecord MapResultSetBuilder [^ResultSet rs rsmeta cols]
RowBuilder
(->row [_this] (transient {}))
(column-count [_this] (count cols))
(with-column [this row i]
(with-column-value this row (nth cols (dec i))
(read-column-by-index (.getObject rs ^Integer i) rsmeta i)))
(with-column-value [_this row col v]
(assoc! row col v))
(row! [_this row] (persistent! row))
ResultSetBuilder
(->rs [_this] (transient []))
(with-row [_this mrs row]
(conj! mrs row))
(rs! [_this mrs] (persistent! mrs)))
(defn as-maps
"Given a `ResultSet` and options, return a `RowBuilder` / `ResultSetBuilder`
that produces bare vectors of hash map rows.
This is the default `:builder-fn` option."
[^ResultSet rs opts]
(let [rsmeta (.getMetaData rs)
cols (get-column-names rsmeta opts)]
(->MapResultSetBuilder rs rsmeta cols)))
(defn as-unqualified-maps
"Given a `ResultSet` and options, return a `RowBuilder` / `ResultSetBuilder`
that produces bare vectors of hash map rows, with simple keys."
[^ResultSet rs opts]
(let [rsmeta (.getMetaData rs)
cols (get-unqualified-column-names rsmeta opts)]
(->MapResultSetBuilder rs rsmeta cols)))
(defn as-modified-maps
"Given a `ResultSet` and options, return a `RowBuilder` / `ResultSetBuilder`
that produces bare vectors of hash map rows, with modified keys.
Requires both the `:qualifier-fn` and `:label-fn` options."
[^ResultSet rs opts]
(let [rsmeta (.getMetaData rs)
cols (get-modified-column-names rsmeta opts)]
(->MapResultSetBuilder rs rsmeta cols)))
(defn as-unqualified-modified-maps
"Given a `ResultSet` and options, return a `RowBuilder` / `ResultSetBuilder`
that produces bare vectors of hash map rows, with simple, modified keys.
Requires the `:label-fn` option."
[^ResultSet rs opts]
(let [rsmeta (.getMetaData rs)
cols (get-unqualified-modified-column-names rsmeta opts)]
(->MapResultSetBuilder rs rsmeta cols)))
(defn as-lower-maps
"Given a `ResultSet` and options, return a `RowBuilder` / `ResultSetBuilder`
that produces bare vectors of hash map rows, with lower-case keys."
[rs opts]
(as-modified-maps rs (assoc opts
:qualifier-fn lower-case
:label-fn lower-case)))
(defn as-unqualified-lower-maps
"Given a `ResultSet` and options, return a `RowBuilder` / `ResultSetBuilder`
that produces bare vectors of hash map rows, with simple, lower-case keys."
[rs opts]
(as-unqualified-modified-maps rs (assoc opts :label-fn lower-case)))
(defn as-kebab-maps
"Given a `ResultSet` and options, return a `RowBuilder` / `ResultSetBuilder`
that produces bare vectors of hash map rows, with kebab-case keys."
[rs opts]
(as-modified-maps rs (assoc opts
:qualifier-fn ->kebab-case
:label-fn ->kebab-case)))
(defn as-unqualified-kebab-maps
"Given a `ResultSet` and options, return a `RowBuilder` / `ResultSetBuilder`
that produces bare vectors of hash map rows, with simple, kebab-case keys."
[rs opts]
(as-unqualified-modified-maps rs (assoc opts :label-fn ->kebab-case)))
(defn as-maps-adapter
"Given a map builder function (e.g., `as-lower-maps`) and a column reading
function, return a new builder function that uses that column reading
function instead of `.getObject` so you can override the default behavior.
The default column-reader behavior would be equivalent to:
(defn default-column-reader
[^ResultSet rs ^ResultSetMetaData rsmeta ^Integer i]
(.getObject rs i))
Your column-reader can use the result set metadata to determine whether
to call `.getObject` or some other method to read the column's value.
`read-column-by-index` is still called on the result of that read.
Note: this is different behavior to `builder-adapter`'s `column-by-index-fn`."
[builder-fn column-reader]
(builder-adapter builder-fn
(fn [builder rs i]
(let [^ResultSetMetaData rsmeta (:rsmeta builder)]
(read-column-by-index (column-reader rs rsmeta i)
rsmeta
i)))))
(defn clob->string
"Given a CLOB column value, read it as a string."
[^Clob clob]
(with-open [rdr (.getCharacterStream clob)]
(slurp rdr)))
(defn clob-column-reader
"An example column-reader that still uses `.getObject` but expands CLOB
columns into strings."
[^ResultSet rs ^ResultSetMetaData _ ^Integer i]
(when-let [value (.getObject rs i)]
(cond-> value
(instance? Clob value)
(clob->string))))
(defrecord ArrayResultSetBuilder [^ResultSet rs rsmeta cols]
RowBuilder
(->row [_this] (transient []))
(column-count [_this] (count cols))
(with-column [this row i]
(with-column-value this row nil
(read-column-by-index (.getObject rs ^Integer i) rsmeta i)))
(with-column-value [_this row _ v]
(conj! row v))
(row! [_this row] (persistent! row))
ResultSetBuilder
(->rs [_this] (transient [cols]))
(with-row [_this ars row]
(conj! ars row))
(rs! [_this ars] (persistent! ars)))
(defn as-arrays
"Given a `ResultSet` and options, return a `RowBuilder` / `ResultSetBuilder`
that produces a vector of column names followed by vectors of row values."
[^ResultSet rs opts]
(let [rsmeta (.getMetaData rs)
cols (get-column-names rsmeta opts)]
(->ArrayResultSetBuilder rs rsmeta cols)))
(defn as-unqualified-arrays
"Given a `ResultSet` and options, return a `RowBuilder` / `ResultSetBuilder`
that produces a vector of simple column names followed by vectors of row
values."
[^ResultSet rs opts]
(let [rsmeta (.getMetaData rs)
cols (get-unqualified-column-names rsmeta opts)]
(->ArrayResultSetBuilder rs rsmeta cols)))
(defn as-modified-arrays
"Given a `ResultSet` and options, return a `RowBuilder` / `ResultSetBuilder`
that produces a vector of modified column names followed by vectors of
row values.
Requires both the `:qualifier-fn` and `:label-fn` options."
[^ResultSet rs opts]
(let [rsmeta (.getMetaData rs)
cols (get-modified-column-names rsmeta opts)]
(->ArrayResultSetBuilder rs rsmeta cols)))
(defn as-unqualified-modified-arrays
"Given a `ResultSet` and options, return a `RowBuilder` / `ResultSetBuilder`
that produces a vector of simple, modified column names followed by
vectors of row values.
Requires the `:label-fn` option."
[^ResultSet rs opts]
(let [rsmeta (.getMetaData rs)
cols (get-unqualified-modified-column-names rsmeta opts)]
(->ArrayResultSetBuilder rs rsmeta cols)))
(defn as-lower-arrays
"Given a `ResultSet` and options, return a `RowBuilder` / `ResultSetBuilder`
that produces a vector of lower-case column names followed by vectors of
row values."
[rs opts]
(as-modified-arrays rs (assoc opts
:qualifier-fn lower-case
:label-fn lower-case)))
(defn as-unqualified-lower-arrays
"Given a `ResultSet` and options, return a `RowBuilder` / `ResultSetBuilder`
that produces a vector of simple, lower-case column names followed by
vectors of row values."
[rs opts]
(as-unqualified-modified-arrays rs (assoc opts :label-fn lower-case)))
(defn as-arrays-adapter
"Given an array builder function (e.g., `as-unqualified-arrays`) and a column
reading function, return a new builder function that uses that column reading
function instead of `.getObject` so you can override the default behavior.
The default column-reader behavior would be equivalent to:
(defn default-column-reader
[^ResultSet rs ^ResultSetMetaData rsmeta ^Integer i]
(.getObject rs i))
Your column-reader can use the result set metadata to determine whether
to call `.getObject` or some other method to read the column's value.
`read-column-by-index` is still called on the result of that read.
Note: this is different behavior to `builder-adapter`'s `column-by-index-fn`."
[builder-fn column-reader]
(builder-adapter builder-fn
(fn [builder rs i]
(let [^ResultSetMetaData rsmeta (:rsmeta builder)]
(read-column-by-index (column-reader rs rsmeta i)
rsmeta
i)))))
(declare navize-row)
(declare navable-row)
(defprotocol DatafiableRow
"Protocol for making rows datafiable and therefore navigable.
The default implementation just adds metadata so that `datafy` can be
called on the row, which will produce something that `nav` can be called
on, to lazily navigate through foreign key relationships into other tables.
If `datafiable-row` is called when reducing the result set produced by
`next.jdbc/plan`, the row is fully-realized from the `ResultSet`
first, using the `:builder-fn` (or `as-maps` by default)."
(datafiable-row [this connectable opts]
"Produce a datafiable representation of a row from a `ResultSet`."))
(defn- row-builder
"Given a `RowBuilder` -- a row materialization strategy -- produce a fully
materialized row from it."
[builder]
(->> (reduce (fn [r i] (with-column builder r i))
(->row builder)
(range 1 (inc (column-count builder))))
(row! builder)))
(definterface MapifiedResultSet)
(defprotocol InspectableMapifiedResultSet
"Protocol for exposing aspects of the (current) result set via functions.
The intent here is to expose information that is associated with either
the (current row of the) result set or the result set metadata, via
functions that can be called inside a reducing function being used over
`next.jdbc/plan`, including situations where the reducing function has
to realize a row by calling `datafiable-row` but still wants to call
these functions on the (realized) row."
:extend-via-metadata true
(row-number [this]
"Return the current 1-based row number, if available.
Should not cause any row realization.")
(column-names [this]
"Return a vector of the column names from the result set.
Reifies the result builder, in order to construct column names,
but should not cause any row realization.")
(metadata [this]
"Return the raw `ResultSetMetaData` object from the result set.
Should not cause any row realization.
If `next.jdbc.datafy` has been required, this metadata will be
fully-realized as a Clojure data structure, otherwise this should
not be allowed to 'leak' outside of the reducing function as it may
depend on the connection remaining open, in order to be valid."))
(defn- mapify-result-set
"Given a `ResultSet`, return an object that wraps the current row as a hash
map. Note that a result set is mutable and the current row will change behind
this wrapper so operations need to be eager (and fairly limited).
Supports `IPersistentMap` in full. Any operation that requires a full hash
map (`assoc`, `dissoc`, `cons`, `seq`, etc) will cause a full row to be
realized (via `row-builder` above). The result will be a regular map: if
you want the row to be datafiable/navigable, use `datafiable-row` to
realize the full row explicitly before performing other
(metadata-preserving) operations on it."
[^ResultSet rs opts]
(let [builder (delay ((get opts :builder-fn as-maps) rs opts))
name-fn (:name-fn opts name)
name-fn (if (contains? opts :column-fn)
(comp (get opts :column-fn) name-fn)
name-fn)]
(reify
MapifiedResultSet
;; marker, just for printing resolution
InspectableMapifiedResultSet
(row-number [_this] (.getRow rs))
(column-names [_this] (:cols @builder))
(metadata [_this] (d/datafy (.getMetaData rs)))
clojure.lang.IPersistentMap
(assoc [_this k v]
(assoc (row-builder @builder) k v))
(assocEx [_this k v]
(.assocEx ^clojure.lang.IPersistentMap (row-builder @builder) k v))
(without [_this k]
(dissoc (row-builder @builder) k))
java.lang.Iterable ; Java 7 compatible: no forEach / spliterator
(iterator [_this]
(.iterator ^java.lang.Iterable (row-builder @builder)))
clojure.lang.Associative
(containsKey [_this k]
(try
(.getObject rs ^String (name-fn k))
true
(catch SQLException _
false)))
(entryAt [_this k]
(try
(clojure.lang.MapEntry. k (read-column-by-label
(.getObject rs ^String (name-fn k))
^String (name-fn k)))
(catch SQLException _)))
clojure.lang.Counted
(count [_this]
(column-count @builder))
clojure.lang.IPersistentCollection
(cons [_this obj]
(let [row (row-builder @builder)]
(conj row obj)))
(empty [_this]
{})
(equiv [_this obj]
(.equiv ^clojure.lang.IPersistentCollection (row-builder @builder) obj))
;; we support get with a numeric key for array-based builders:
clojure.lang.ILookup
(valAt [_this k]
(try
(if (number? k)
(let [^Integer i (inc k)]
(read-column-by-index (.getObject rs i) (:rsmeta @builder) i))
(read-column-by-label (.getObject rs ^String (name-fn k)) ^String (name-fn k)))
(catch SQLException _)))
(valAt [_this k not-found]
(try
(if (number? k)
(let [^Integer i (inc k)]
(read-column-by-index (.getObject rs i) (:rsmeta @builder) i))
(read-column-by-label (.getObject rs ^String (name-fn k)) ^String (name-fn k)))
(catch SQLException _
not-found)))
;; we support nth for array-based builders (i is primitive int here!):
clojure.lang.Indexed
(nth [_this i]
(try
(let [i (inc i)]
(read-column-by-index (.getObject rs i) (:rsmeta @builder) i))
(catch SQLException _)))
(nth [_this i not-found]
(try
(let [i (inc i)]
(read-column-by-index (.getObject rs i) (:rsmeta @builder) i))
(catch SQLException _
not-found)))
clojure.lang.Seqable
(seq [_this]
(seq (row-builder @builder)))
DatafiableRow
(datafiable-row [_this connectable opts]
;; since we have to call these eagerly, we trap any exceptions so
;; that they can be thrown when the actual functions are called
(let [row (try (.getRow rs) (catch Throwable t t))
cols (try (:cols @builder) (catch Throwable t t))
meta (try (d/datafy (.getMetaData rs)) (catch Throwable t t))]
(vary-meta
(row-builder @builder)
assoc
`core-p/datafy
(navize-row connectable opts)
`core-p/nav
(navable-row connectable opts)
`row-number
(fn [_] (if (instance? Throwable row) (throw row) row))
`column-names
(fn [_] (if (instance? Throwable cols) (throw cols) cols))
`metadata
(fn [_] (if (instance? Throwable meta) (throw meta) meta)))))
(toString [_]
(try
(str (row-builder @builder))
(catch Throwable _
"{row} from `plan` -- missing `map` or `reduce`?"))))))
(defmethod print-dup MapifiedResultSet [rs ^java.io.Writer w]
(.write w (str rs)))
(prefer-method print-dup MapifiedResultSet clojure.lang.IPersistentMap)
(defmethod print-method MapifiedResultSet [rs ^java.io.Writer w]
(.write w (str rs)))
(prefer-method print-method MapifiedResultSet clojure.lang.IPersistentMap)
(extend-protocol
DatafiableRow
ResultSet
(datafiable-row [this connectable opts]
(let [mapped (mapify-result-set this opts)]
(datafiable-row mapped connectable opts))))
(extend-protocol
DatafiableRow
clojure.lang.IObj ; assume we can "navigate" anything that accepts metadata
;; in reality, this is going to be over-optimistic and will like cause `nav`
;; to fail on attempts to navigate into result sets that are not hash maps
(datafiable-row [this connectable opts]
(vary-meta this assoc
`core-p/datafy (navize-row connectable opts)
`core-p/nav (navable-row connectable opts))))
(defn datafiable-result-set
"Given a ResultSet, a connectable, and an options hash map, return a fully
realized, datafiable result set per the `:builder-fn` option passed in.
If no `:builder-fn` option is provided, `as-maps` is used as the default.
The connectable and the options can both be omitted. If connectable is
omitted, `nil` is used and no foreign key navigation will be available
for any datafied result. If you want to pass a hash map as the connectable,
you must also pass an options hash map.
This can be used to process regular result sets or metadata result sets."
([rs]
(datafiable-result-set rs nil {}))
([rs conn-or-opts]
(datafiable-result-set
rs
(if (map? conn-or-opts) nil conn-or-opts)
(if (map? conn-or-opts) conn-or-opts {})))
([^java.sql.ResultSet rs connectable opts]
(let [builder-fn (get opts :builder-fn as-maps)
builder (builder-fn rs opts)]
(loop [rs' (->rs builder) more? (.next rs)]
(if more?
(recur (with-row builder rs'
(datafiable-row (row-builder builder) connectable opts))
(.next rs))
(rs! builder rs'))))))
;; make it available to next.jdbc.prepare
(vreset! @#'prepare/d-r-s datafiable-result-set)
(defn- stmt->result-set
"Given a `PreparedStatement` and options, execute it and return a `ResultSet`
if possible."
^ResultSet
[^PreparedStatement stmt opts]
(if (.execute stmt)
(.getResultSet stmt)
(when (:return-keys opts)
(try
(.getGeneratedKeys stmt)
(catch Exception _)))))
(defn- stmt->result-set-update-count
"Given a connectable, a `Statement`, a flag indicating there might
be a result set, and options, return a (datafiable) result set if possible
(either from the statement or from generated keys). If no result set is
available, return a 'fake' result set containing the update count.
If no update count is available either, return nil."
[connectable ^Statement stmt go opts]
(if-let [^ResultSet
rs (if go
(.getResultSet stmt)
(when (:return-keys opts)
(try
(.getGeneratedKeys stmt)
(catch Exception _))))]
(datafiable-result-set rs connectable opts)
(let [n (.getUpdateCount stmt)]
(when-not (= -1 n)
[{:next.jdbc/update-count n}]))))
(defn- reduce-result-set
"Given a `ResultSet`, a reducing function, an initial value, and options,
perform a reduction of the result set. This is used internally by higher
level `reduce-*` functions."
[^ResultSet rs f init opts]
(let [rs-map (mapify-result-set rs opts)]
(loop [init' init]
(if (.next rs)
(let [result (f init' rs-map)]
(if (reduced? result)
@result
(recur result)))
init'))))
(defn- reduce-stmt
"Execute the `PreparedStatement`, attempt to get either its `ResultSet` or
its generated keys (as a `ResultSet`), and reduce that using the supplied
function and initial value.
If the statement yields neither a `ResultSet` nor generated keys, return
a hash map containing `:next.jdbc/update-count` and the number of rows
updated, with the supplied function and initial value applied."
[^PreparedStatement stmt f init opts]
(if-let [rs (stmt->result-set stmt opts)]
(reduce-result-set rs f init opts)
(f init {:next.jdbc/update-count (.getUpdateCount stmt)})))
;; ForkJoinTask wrappers copied in from clojure.core.reducers to avoid
;; relying on private functionality that might possibly change over time
(defn- fjtask [^Callable f]
(ForkJoinTask/adapt f))
(defn- fjinvoke
"For now, this still relies on clojure.core.reducers/pool which is
public but undocumented."
[f]
(if (ForkJoinTask/inForkJoinPool)
(f)
(.invoke ^ForkJoinPool @r/pool ^ForkJoinTask (fjtask f))))
(defn- fjfork [task] (.fork ^ForkJoinTask task))
(defn- fjjoin [task] (.join ^ForkJoinTask task))
(defn- fold-result-set
"Given a `ResultSet`, a batch size, combining and reducing functions,
a connectable, and options, perform a fold of the result set. This is
used internally by higher level `fold-*` functions."
[^ResultSet rs n combinef reducef connectable opts]
(let [rs-map (mapify-result-set rs opts)
chunk (fn [batch] (fjtask #(r/reduce reducef (combinef) batch)))
realize (fn [row] (datafiable-row row connectable opts))]
(loop [batch [] task nil]
(if (.next rs)
(if (= n (count batch))
(recur [(realize rs-map)]
(let [t (fjfork (chunk batch))]
(if task
(fjfork
(fjtask #(combinef (fjjoin task)
(fjjoin t))))
t)))
(recur (conj batch (realize rs-map)) task))
(if (seq batch)
(let [t (fjfork (chunk batch))]
(fjinvoke
#(combinef (if task (fjjoin task) (combinef))
(fjjoin t))))
(if task
(fjinvoke
#(combinef (combinef) (fjjoin task)))
(combinef)))))))
(defn reducible-result-set
"Given a `ResultSet`, return an `IReduceInit` that can be reduced. An
options hash map may be provided.
You are responsible for ensuring the `Connection` for this `ResultSet`
remains open until the reduction is complete!"
([rs]
(reducible-result-set rs {}))
([rs opts]
(reify
clojure.lang.IReduceInit
(reduce [_ f init]
(reduce-result-set rs f init opts)))))
(defn foldable-result-set
"Given a `ResultSet` and an optional connectable, return an `r/CollFold`
that can be folded. An options hash map may be provided.
You are responsible for ensuring the `Connection` for this `ResultSet`
and the connectable both remain open until the fold is complete!
If the connectable is omitted, no foreign key navigation would be
available in any datafied result. If you want to pass a hash map as the
connectable, you must also pass an options hash map."
([rs]
(foldable-result-set rs nil {}))
([rs conn-or-opts]
(foldable-result-set
rs
(if (map? conn-or-opts) nil conn-or-opts)
(if (map? conn-or-opts) conn-or-opts {})))
([rs connectable opts]
(reify
r/CollFold
(coll-fold [_ n combinef reducef]
(fold-result-set rs n combinef reducef connectable opts)))))
(defn- fold-stmt
"Execute the `PreparedStatement`, attempt to get either its `ResultSet` or
its generated keys (as a `ResultSet`), and fold that using the supplied
batch size, combining function, and reducing function.
If the statement yields neither a `ResultSet` nor generated keys, produce
a hash map containing `:next.jdbc/update-count` and the number of rows
updated, and fold that as a single element collection."
[^PreparedStatement stmt n combinef reducef connectable opts]
(if-let [rs (stmt->result-set stmt opts)]
(fold-result-set rs n combinef reducef connectable opts)
(reducef (combinef) {:next.jdbc/update-count (.getUpdateCount stmt)})))
(defn- stmt-sql->result-set
"Given a `Statement`, a SQL command, execute it and return a
`ResultSet` if possible. We always attempt to return keys."
^ResultSet
[^Statement stmt ^String sql]
(if (.execute stmt sql)
(.getResultSet stmt)
(try
(.getGeneratedKeys stmt)
(catch Exception _))))
(defn- reduce-stmt-sql
"Execute the SQL command on the given `Statement`, attempt to get either
its `ResultSet` or its generated keys (as a `ResultSet`), and reduce
that using the supplied function and initial value.
If the statement yields neither a `ResultSet` nor generated keys, return
a hash map containing `:next.jdbc/update-count` and the number of rows
updated, with the supplied function and initial value applied."
[^Statement stmt sql f init opts]
(if-let [rs (stmt-sql->result-set stmt sql)]
(reduce-result-set rs f init opts)
(f init {:next.jdbc/update-count (.getUpdateCount stmt)})))
(defn- fold-stmt-sql
"Execute the SQL command on the given `Statement`, attempt to get either
its `ResultSet` or its generated keys (as a `ResultSet`), and fold that
using the supplied batch size, combining function, and reducing function.
If the statement yields neither a `ResultSet` nor generated keys, produce
a hash map containing `:next.jdbc/update-count` and the number of rows
updated, and fold that as a single element collection."
[^Statement stmt sql n combinef reducef connectable opts]
(if-let [rs (stmt-sql->result-set stmt sql)]
(fold-result-set rs n combinef reducef connectable opts)
(reducef (combinef) {:next.jdbc/update-count (.getUpdateCount stmt)})))
(extend-protocol p/Executable
java.sql.Connection
(-execute [this sql-params opts]
(reify
clojure.lang.IReduceInit
(reduce [_ f init]
(with-open [stmt (prepare/create this
(first sql-params)
(rest sql-params)
opts)]
(reduce-stmt stmt f init opts)))
r/CollFold
(coll-fold [_ n combinef reducef]
(with-open [stmt (prepare/create this
(first sql-params)
(rest sql-params)
opts)]
(fold-stmt stmt n combinef reducef this opts)))
(toString [_] "`IReduceInit` from `plan` -- missing reduction?")))
(-execute-one [this sql-params opts]
(with-open [stmt (prepare/create this
(first sql-params)
(rest sql-params)
opts)]
(if-let [rs (stmt->result-set stmt opts)]
(let [builder-fn (get opts :builder-fn as-maps)
builder (builder-fn rs opts)]
(when (.next rs)
(datafiable-row (row-builder builder) this opts)))
{:next.jdbc/update-count (.getUpdateCount stmt)})))
(-execute-all [this sql-params opts]
(with-open [stmt (prepare/create this
(first sql-params)
(rest sql-params)
opts)]
(if (:multi-rs opts)
(loop [go (.execute stmt) acc []]
(if-let [rs (stmt->result-set-update-count this stmt go opts)]
(recur (.getMoreResults stmt) (conj acc rs))
acc))
(if-let [rs (stmt->result-set stmt opts)]
(datafiable-result-set rs this opts)
[{:next.jdbc/update-count (.getUpdateCount stmt)}]))))
javax.sql.DataSource
(-execute [this sql-params opts]
(reify
clojure.lang.IReduceInit
(reduce [_ f init]
(with-open [con (p/get-connection this opts)
stmt (prepare/create con
(first sql-params)
(rest sql-params)
opts)]
(reduce-stmt stmt f init opts)))
r/CollFold
(coll-fold [_ n combinef reducef]
(with-open [con (p/get-connection this opts)
stmt (prepare/create con
(first sql-params)
(rest sql-params)
opts)]
(fold-stmt stmt n combinef reducef this opts)))
(toString [_] "`IReduceInit` from `plan` -- missing reduction?")))
(-execute-one [this sql-params opts]
(with-open [con (p/get-connection this opts)
stmt (prepare/create con
(first sql-params)
(rest sql-params)
opts)]
(if-let [rs (stmt->result-set stmt opts)]
(let [builder-fn (get opts :builder-fn as-maps)
builder (builder-fn rs opts)]
(when (.next rs)
(datafiable-row (row-builder builder) this opts)))
{:next.jdbc/update-count (.getUpdateCount stmt)})))
(-execute-all [this sql-params opts]
(with-open [con (p/get-connection this opts)
stmt (prepare/create con
(first sql-params)
(rest sql-params)
opts)]
(if (:multi-rs opts)
(loop [go (.execute stmt) acc []]
(if-let [rs (stmt->result-set-update-count this stmt go opts)]
(recur (.getMoreResults stmt) (conj acc rs))
acc))
(if-let [rs (stmt->result-set stmt opts)]
(datafiable-result-set rs this opts)
[{:next.jdbc/update-count (.getUpdateCount stmt)}]))))
java.sql.PreparedStatement
;; we can't tell if this PreparedStatement will return generated
;; keys so we pass a truthy value to at least attempt it if we
;; do not get a ResultSet back from the execute call
(-execute [this _ opts]
(reify
clojure.lang.IReduceInit
(reduce [_ f init]
(reduce-stmt this f init (assoc opts :return-keys true)))
r/CollFold
(coll-fold [_ n combinef reducef]
(fold-stmt this n combinef reducef (.getConnection this)
(assoc opts :return-keys true)))
(toString [_] "`IReduceInit` from `plan` -- missing reduction?")))
(-execute-one [this _ opts]
(if-let [rs (stmt->result-set this (assoc opts :return-keys true))]
(let [builder-fn (get opts :builder-fn as-maps)
builder (builder-fn rs opts)]
(when (.next rs)
(datafiable-row (row-builder builder)
(.getConnection this) opts)))
{:next.jdbc/update-count (.getUpdateCount this)}))
(-execute-all [this _ opts]
(if (:multi-rs opts)
(loop [go (.execute this) acc []]
(if-let [rs (stmt->result-set-update-count
(.getConnection this) this go (assoc opts :return-keys true))]
(recur (.getMoreResults this) (conj acc rs))
acc))
(if-let [rs (stmt->result-set this (assoc opts :return-keys true))]
(datafiable-result-set rs (.getConnection this) opts)
[{:next.jdbc/update-count (.getUpdateCount this)}])))
java.sql.Statement
(-execute [this sql-params opts]
(assert (= 1 (count sql-params))
"Parameters cannot be provided when executing a non-prepared Statement")
(reify
clojure.lang.IReduceInit
(reduce [_ f init]
(reduce-stmt-sql this (first sql-params) f init opts))
r/CollFold
(coll-fold [_ n combinef reducef]
(fold-stmt-sql this (first sql-params) n combinef reducef
(.getConnection this) opts))
(toString [_] "`IReduceInit` from `plan` -- missing reduction?")))
(-execute-one [this sql-params opts]
(assert (= 1 (count sql-params))
"Parameters cannot be provided when executing a non-prepared Statement")
(if-let [rs (stmt-sql->result-set this (first sql-params))]
(let [builder-fn (get opts :builder-fn as-maps)
builder (builder-fn rs opts)]
(when (.next rs)
(datafiable-row (row-builder builder)
(.getConnection this) opts)))
{:next.jdbc/update-count (.getUpdateCount this)}))
(-execute-all [this sql-params opts]
(assert (= 1 (count sql-params))
"Parameters cannot be provided when executing a non-prepared Statement")
(if (:multi-rs opts)
(loop [go (.execute this (first sql-params)) acc []]
(if-let [rs (stmt->result-set-update-count
(.getConnection this) this go (assoc opts :return-keys true))]
(recur (.getMoreResults this) (conj acc rs))
acc))
(if-let [rs (stmt-sql->result-set this (first sql-params))]
(datafiable-result-set rs (.getConnection this) opts)
[{:next.jdbc/update-count (.getUpdateCount this)}])))
Object
(-execute [this sql-params opts]
(p/-execute (p/get-datasource this) sql-params opts))
(-execute-one [this sql-params opts]
(p/-execute-one (p/get-datasource this) sql-params opts))
(-execute-all [this sql-params opts]
(p/-execute-all (p/get-datasource this) sql-params opts)))
(defn- default-schema
"The default schema lookup rule for column names.
We have a foreign key column suffix convention of `` or
`_`, which maps to a (primary) key in the ``.
By default, both `` and `` are assumed to be `id`. That can be
overridden by the `:schema-opts` hash map in the options:
* `:fk-suffix` -- the suffix for foreign key columns, default `id`
* `:pk` -- the (primary) key column name, default `id`
* `:pk-fn` -- a function to apply to the table name and the value of `:pk`
to get the (primary) key column name, default `(constantly )`."
[opts col]
(let [fk-suffix (get-in opts [:schema-opts :fk-suffix] "id")
pk (get-in opts [:schema-opts :pk] "id")
pk-fn (get-in opts [:schema-opts :pk-fn] (constantly (name pk)))
[_ table] (re-find (re-pattern (str "(?i)^(.+?)[-_]?"
(name fk-suffix)
"$"))
(name col))]
(when table
[(keyword table) (keyword (pk-fn table pk))])))
(comment
(default-schema {} :userstatusid)
(default-schema {} :userstatus_id)
(default-schema {} :user_statusid)
(default-schema {:schema-opts {:fk-suffix "did"}} :user_id)
(default-schema {:schema-opts {:fk-suffix "did"}} :user_did)
(default-schema {:schema-opts {:fk-suffix "did"}} :user-did)
(default-schema {:schema-opts {:fk-suffix "(did|id)"}} :user_id)
(default-schema {:schema-opts {:fk-suffix "(did|id)"}} :user_did)
(default-schema {:schema-opts {:fk-suffix "(did|id)"}} :user-did)
(default-schema {:schema-opts {:fk-suffix "(did|id)"
:pk :did}} :user_did)
(default-schema {:schema-opts {:fk-suffix "(did|id)"
:pk :did
:pk-fn (fn [table pk]
(if (= "user" table)
"id"
pk))}}
:user_did)
(default-schema {:schema-opts {:fk-suffix "(did|id)"
:pk :did
:pk-fn (fn [table pk]
(if (= "user" table)
"id"
pk))}}
:user-did)
(default-schema {:schema-opts {:fk-suffix "(did|id)"
:pk :did
:pk-fn (fn [table pk]
(if (= "user" table)
"id"
pk))}}
:book_did)
)
(defn- expand-schema
"Given a (possibly nil) schema entry, return it expanded to a triple of:
[table fk cardinality]
Possibly schema entry input formats are:
* [table fk] => cardinality :one
* [table fk cardinality] -- no change
* :table/fk => [:table :fk :one]
* [:table/fk] => [:table :fk :many]"
[k entry]
(when entry
(if-let [mapping
(cond
(keyword? entry)
[(keyword (namespace entry)) (keyword (name entry)) :one]
(coll? entry)
(let [[table fk cardinality] entry]
(cond (and table fk cardinality)
entry
(and table fk)
[table fk :one]
(keyword? table)
[(keyword (namespace table)) (keyword (name table)) :many])))]
mapping
(throw (ex-info (str "Invalid schema entry for: " (name k)) {:entry entry})))))
(comment
(expand-schema :user/statusid nil)
(expand-schema :user/statusid :status/id)
(expand-schema :user/statusid [:status :id])
(expand-schema :user/email [:deliverability :email :many])
(expand-schema :user/email [:deliverability/email]))
(defn- navize-row
"Given a connectable object, return a function that knows how to turn a row
into a `nav`igable object.
See navable-row below for more details."
[connectable opts]
(fn [row]
(vary-meta row assoc `core-p/nav (navable-row connectable opts))))
(defn- navable-row
"Given a connectable object, return a function that knows how to `nav`
into a row.
A `:schema` option can provide a map from qualified column names
(`:/`) to tuples that indicate for which table they are a
foreign key, the name of the key within that table, and (optionality) the
cardinality of that relationship (`:many`, `:one`).
If no `:schema` item is provided for a column, the convention of `id` or
`_id` is used, and the assumption is that such columns are foreign keys
in the `` portion of their name, the key is called `id`, and the
cardinality is `:one`.
That convention can in turn be modified via the `:schema-opts` option.
Rows are looked up using `-execute-all` or `-execute-one`, and the `:table-fn`
option, if provided, is applied to the assumed table name and `:column-fn` if
provided to the assumed foreign key column name."
[connectable opts]
(fn [_ k v]
(try
(let [[table fk cardinality]
(expand-schema k (or (get-in opts [:schema k])
(default-schema opts k)))]
(if (and fk connectable)
(let [table-fn (:table-fn opts identity)
column-fn (:column-fn opts identity)
exec-fn! (if (= :many cardinality)
p/-execute-all
p/-execute-one)]
(exec-fn! connectable
[(str "SELECT * FROM "
(table-fn (name table))
" WHERE "
(column-fn (name fk))
" = ?")
v]
opts))
v))
(catch Exception _
;; assume an exception means we just cannot
;; navigate anywhere, so return just the value
v))))
next-jdbc-1.3.955/src/next/jdbc/specs.clj 0000664 0000000 0000000 00000026151 14700603111 0020030 0 ustar 00root root 0000000 0000000 ;; copyright (c) 2019-2023 Sean Corfield, all rights reserved
(ns next.jdbc.specs
"Specs for the core API of next.jdbc.
The functions from `next.jdbc`, `next.jdbc.sql`, and `next.jdbc.prepare`
have specs here.
Just `:args` are spec'd. These specs are intended to aid development
with `next.jdbc` by catching simple errors in calling the library.
The `connectable` argument is currently just `any?` but both
`get-datasource` and `get-connection` have stricter specs.
In addition, there is an `instrument` function that provides a simple
way to instrument all of the `next.jdbc` functions, and `unstrument`
to undo that."
(:require [clojure.spec.alpha :as s]
[clojure.spec.test.alpha :as st]
[next.jdbc :as jdbc]
[next.jdbc.connection :as connection]
[next.jdbc.prepare :as prepare]
[next.jdbc.protocols :as p]
[next.jdbc.sql :as sql])
(:import (java.sql Connection PreparedStatement Statement)
(javax.sql DataSource)))
(set! *warn-on-reflection* true)
(s/def ::dbtype string?)
(s/def ::dbname string?)
(s/def ::dbname-separator string?)
(s/def ::classname string?)
(s/def ::user string?)
(s/def ::password string?)
(s/def ::host (s/or :name string?
:none #{:none}))
(s/def ::host-prefix string?)
(s/def ::port (s/or :port pos-int?
:none #{:none}))
(s/def ::db-spec-map (s/keys :req-un [::dbtype ::dbname]
:opt-un [::classname
::user ::password
::host ::port
::dbname-separator
::host-prefix]))
(defn jdbc-url-format?
"JDBC URLs must begin with `jdbc:` followed by the `dbtype` and
a second colon. Note: `clojure.java.jdbc` incorrectly allowed
`jdbc:` to be omitted at the beginning of a JDBC URL."
[url]
(re-find #"^jdbc:[^:]+:" url))
(s/def ::jdbcUrl (s/and string? jdbc-url-format?))
(comment
(s/explain-data ::jdbcUrl "jdbc:somedb://some-host/dbname")
(s/explain-data ::jdbcUrl "somedb://some-host/dbname"))
(s/def ::jdbc-url-map (s/keys :req-un [::jdbcUrl]))
(s/def ::connection #(instance? Connection %))
(s/def ::datasource #(instance? DataSource %))
(s/def ::prepared-statement #(instance? PreparedStatement %))
(s/def ::statement #(instance? Statement %))
(s/def ::db-spec (s/or :db-spec ::db-spec-map
:jdbc-url ::jdbc-url-map
:string ::jdbcUrl
:ds ::datasource))
(s/def ::db-spec-or-jdbc (s/or :db-spec ::db-spec-map
:jdbc-url ::jdbc-url-map))
(s/def ::proto-connectable (s/or :db-spec ::db-spec
:connectable #(satisfies? p/Connectable %)
:sourceable #(satisfies? p/Sourceable %)))
(s/def ::connectable any?)
(s/def ::key-map (s/map-of keyword? any?))
(s/def ::example-map (s/map-of keyword? any? :min-count 1))
;; can be a simple column name (keyword) or a pair of something and as alias
;; and that something can be a simple column name (keyword) or an arbitrary
;; expression (string) where we assume you know what you're doing
(s/def ::column-spec (s/or :column keyword?
:alias (s/and vector?
(s/cat :expr (s/or :col keyword?
:str string?)
:column keyword?))))
(s/def ::columns (s/coll-of ::column-spec :kind vector?))
(s/def ::order-by-col (s/or :col keyword?
:dir (s/cat :col keyword?
:dir #{:asc :desc})))
(s/def ::order-by (s/coll-of ::order-by-col :kind vector? :min-count 1))
(s/def ::opts-map (s/and (s/map-of keyword? any?)
(s/keys :opt-un [::columns ::order-by])))
(s/def ::transactable any?)
(s/def ::sql-params (s/and vector?
(s/cat :sql string?
:params (s/* any?))))
(s/def ::params (s/coll-of any? :kind sequential?))
(s/def ::batch-size pos-int?)
(s/def ::large boolean?)
(s/def ::batch-opts (s/keys :opt-un [::batch-size ::large]))
(s/fdef jdbc/get-datasource
:args (s/cat :spec ::proto-connectable))
(s/fdef jdbc/get-connection
:args (s/cat :spec ::proto-connectable
:opts (s/? ::opts-map)))
(s/fdef jdbc/prepare
:args (s/cat :connection ::connection
:sql-params ::sql-params
:opts (s/? ::opts-map)))
(s/fdef jdbc/plan
:args (s/alt :prepared (s/cat :stmt ::statement)
:sql (s/cat :connectable ::connectable
:sql-params (s/nilable ::sql-params)
:opts (s/? ::opts-map))))
(s/fdef jdbc/execute!
:args (s/alt :prepared (s/cat :stmt ::statement)
:sql (s/cat :connectable ::connectable
:sql-params (s/nilable ::sql-params)
:opts (s/? ::opts-map))))
(s/fdef jdbc/execute-one!
:args (s/alt :prepared (s/cat :stmt ::statement)
:sql (s/cat :connectable ::connectable
:sql-params (s/nilable ::sql-params)
:opts (s/? ::opts-map))))
(s/fdef jdbc/execute-batch!
:args (s/alt :prepared (s/cat :ps ::prepared-statement
:param-groups (s/coll-of ::params :kind sequential?)
:opts (s/? ::batch-opts))
:sql (s/cat :connectable ::connectable
:sql string?
:param-groups (s/coll-of ::params :kind sequential?)
:opts ::batch-opts)))
(s/fdef jdbc/transact
:args (s/cat :transactable ::transactable
:f fn?
:opts (s/? ::opts-map)))
(s/fdef jdbc/with-options
:args (s/cat :connectable ::connectable
:opts ::opts-map))
(s/fdef jdbc/with-transaction
:args (s/cat :binding (s/and vector?
(s/cat :sym simple-symbol?
:transactable ::transactable
:opts (s/? any?)))
:body (s/* any?)))
(s/fdef jdbc/with-transaction+options
:args (s/cat :binding (s/and vector?
(s/cat :sym simple-symbol?
:transactable ::transactable
:opts (s/? any?)))
:body (s/* any?)))
(s/fdef jdbc/on-connection
:args (s/cat :binding (s/and vector?
(s/cat :sym simple-symbol?
:connectable ::connectable))
:body (s/* any?)))
(s/fdef jdbc/on-connection+options
:args (s/cat :binding (s/and vector?
(s/cat :sym simple-symbol?
:connectable ::connectable))
:body (s/* any?)))
(s/fdef connection/->pool
:args (s/cat :clazz #(instance? Class %)
:db-spec ::db-spec-or-jdbc))
(s/fdef connection/component
:args (s/cat :clazz #(instance? Class %)
:db-spec ::db-spec-or-jdbc
:close-fn (s/? fn?)))
(s/fdef prepare/set-parameters
:args (s/cat :ps ::prepared-statement
:params ::params))
(s/fdef prepare/statement
:args (s/cat :connection ::connection
:opts (s/? ::opts-map)))
(s/fdef sql/insert!
:args (s/cat :connectable ::connectable
:table keyword?
:key-map ::key-map
:opts (s/? ::opts-map)))
(s/fdef sql/insert-multi!
:args
(s/or
:with-rows-and-columns
(s/and (s/cat :connectable ::connectable
:table keyword?
:cols (s/coll-of keyword? :kind sequential?)
:rows (s/coll-of (s/coll-of any?
:kind sequential?
:min-count 1)
:kind sequential?)
:opts (s/? ::opts-map))
#(apply = (count (:cols %))
(map count (:rows %))))
:with-hash-maps
(s/cat :connectable ::connectable
:table keyword?
:hash-maps (s/coll-of map? :kind sequential?)
:opts (s/? ::opts-map))))
(s/fdef sql/query
:args (s/cat :connectable ::connectable
:sql-params ::sql-params
:opts (s/? ::opts-map)))
(s/fdef sql/find-by-keys
:args (s/cat :connectable ::connectable
:table keyword?
:key-map (s/or :example ::example-map
:where ::sql-params
:all #{:all})
:opts (s/? ::opts-map)))
(s/fdef sql/aggregate-by-keys
:args (s/cat :connectable ::connectable
:table keyword?
:aggregate string?
:key-map (s/or :example ::example-map
:where ::sql-params
:all #{:all})
:opts (s/? ::opts-map)))
(s/fdef sql/get-by-id
:args (s/alt :with-id (s/cat :connectable ::connectable
:table keyword?
:pk any?
:opts (s/? ::opts-map))
:pk-name (s/cat :connectable ::connectable
:table keyword?
:pk any?
:pk-name keyword?
:opts ::opts-map)))
(s/fdef sql/update!
:args (s/cat :connectable ::connectable
:table keyword?
:key-map ::key-map
:where-params (s/or :example ::example-map
:where ::sql-params)
:opts (s/? ::opts-map)))
(s/fdef sql/delete!
:args (s/cat :connectable ::connectable
:table keyword?
:where-params (s/or :example ::example-map
:where ::sql-params)
:opts (s/? ::opts-map)))
(def ^:private fns-with-specs
[`jdbc/get-datasource
`jdbc/get-connection
`jdbc/prepare
`jdbc/plan
`jdbc/execute!
`jdbc/execute-one!
`jdbc/execute-batch!
`jdbc/transact
`jdbc/with-transaction
`jdbc/with-options
`connection/->pool
`connection/component
`prepare/set-parameters
`prepare/statement
`sql/insert!
`sql/insert-multi!
`sql/query
`sql/find-by-keys
`sql/get-by-id
`sql/update!
`sql/delete!])
(defn instrument []
(st/instrument fns-with-specs))
(defn unstrument []
(st/unstrument fns-with-specs))
next-jdbc-1.3.955/src/next/jdbc/sql.clj 0000664 0000000 0000000 00000024164 14700603111 0017514 0 ustar 00root root 0000000 0000000 ;; copyright (c) 2019-2024 Sean Corfield, all rights reserved
(ns next.jdbc.sql
"Some utility functions that make common operations easier by
providing some syntactic sugar over `execute!`/`execute-one!`.
This is intended to provide a minimal level of parity with
`clojure.java.jdbc` (`insert!`, `insert-multi!`, `query`, `find-by-keys`,
`get-by-id`, `update!`, and `delete!`).
For anything more complex, use a library like HoneySQL
https://github.com/seancorfield/honeysql to generate SQL + parameters.
The following options are supported:
* `:table-fn` -- specify a function used to convert table names (strings)
to SQL entity names -- see the `next.jdbc.quoted` namespace for the
most common quoting strategy functions,
* `:column-fn` -- specify a function used to convert column names (strings)
to SQL entity names -- see the `next.jdbc.quoted` namespace for the
most common quoting strategy functions.
In addition, `find-by-keys` supports `:order-by` to add an `ORDER BY`
clause to the generated SQL."
(:require [clojure.string :as str]
[next.jdbc :refer [execute! execute-batch! execute-one!]]
[next.jdbc.sql.builder
:refer [for-delete for-insert for-insert-multi for-query
for-update]]))
(set! *warn-on-reflection* true)
(defn insert!
"Syntactic sugar over `execute-one!` to make inserting hash maps easier.
Given a connectable object, a table name, and a data hash map, inserts the
data as a single row in the database and attempts to return a map of generated
keys."
([connectable table key-map]
(insert! connectable table key-map {}))
([connectable table key-map opts]
(let [opts (merge (:options connectable) opts)]
(execute-one! connectable
(for-insert table key-map opts)
(merge {:return-keys true} opts)))))
(defn insert-multi!
"Syntactic sugar over `execute!` or `execute-batch!` to make inserting
columns/rows easier.
Given a connectable object, a table name, a sequence of column names, and
a vector of rows of data (vectors of column values), inserts the data as
multiple rows in the database and attempts to return a vector of maps of
generated keys.
Given a connectable object, a table name, a sequence of hash maps of data,
which all have the same set of keys, inserts the data as multiple rows in
the database and attempts to return a vector of maps of generated keys.
If called with `:batch` true will call `execute-batch!` - see its documentation
for situations in which the generated keys may or may not be returned as well as
additional options that can be passed.
Note: without `:batch` this expands to a single SQL statement with placeholders for
every value being inserted -- for large sets of rows, this may exceed the limits
on SQL string size and/or number of parameters for your JDBC driver or your
database!"
{:arglists '([connectable table hash-maps]
[connectable table hash-maps opts]
[connectable table cols rows]
[connectable table cols rows opts])}
([connectable table hash-maps]
(insert-multi! connectable table hash-maps {}))
([connectable table hash-maps-or-cols opts-or-rows]
(if (map? (first hash-maps-or-cols))
(let [cols (keys (first hash-maps-or-cols))
->row (fn ->row [m]
(map #(get m %) cols))]
(when-not (apply = (map (comp set keys) hash-maps-or-cols))
(throw (IllegalArgumentException.
"insert-multi! hash maps must all have the same keys")))
(insert-multi! connectable table cols (map ->row hash-maps-or-cols) opts-or-rows))
(if (map? opts-or-rows)
(insert-multi! connectable table hash-maps-or-cols [] opts-or-rows)
(insert-multi! connectable table hash-maps-or-cols opts-or-rows {}))))
([connectable table cols rows opts]
(if (seq rows)
(let [opts (merge (:options connectable) opts)
batch? (:batch opts)]
(if batch?
(let [[sql & param-groups] (for-insert-multi table cols rows opts)]
(execute-batch! connectable sql param-groups
(merge {:return-keys true :return-generated-keys true} opts)))
(execute! connectable
(for-insert-multi table cols rows opts)
(merge {:return-keys true} opts))))
[])))
(defn query
"Syntactic sugar over `execute!` to provide a query alias.
Given a connectable object, and a vector of SQL and its parameters,
returns a vector of hash maps of rows that match."
([connectable sql-params]
(query connectable sql-params {}))
([connectable sql-params opts]
(let [opts (merge (:options connectable) opts)]
(execute! connectable sql-params opts))))
(defn find-by-keys
"Syntactic sugar over `execute!` to make certain common queries easier.
Given a connectable object, a table name, and either a hash map of
columns and values to search on or a vector of a SQL where clause and
parameters, returns a vector of hash maps of rows that match.
If `:all` is passed instead of a hash map or vector -- the query will
select all rows in the table, subject to any pagination options below.
If `:columns` is passed, only that specified subset of columns will be
returned in each row (otherwise all columns are selected).
If the `:order-by` option is present, add an `ORDER BY` clause. `:order-by`
should be a vector of column names or pairs of column name / direction,
which can be `:asc` or `:desc`.
If the `:top` option is present, the SQL Server `SELECT TOP ?` syntax
is used and the value of the option is inserted as an additional parameter.
If the `:limit` option is present, the MySQL `LIMIT ? OFFSET ?` syntax
is used (using the `:offset` option if present, else `OFFSET ?` is omitted).
PostgreSQL also supports this syntax.
If the `:offset` option is present (without `:limit`), the standard
`OFFSET ? ROWS FETCH NEXT ? ROWS ONLY` syntax is used (using the `:fetch`
option if present, else `FETCH...` is omitted)."
([connectable table key-map]
(find-by-keys connectable table key-map {}))
([connectable table key-map opts]
(let [opts (merge (:options connectable) opts)]
(execute! connectable (for-query table key-map opts) opts))))
(defn aggregate-by-keys
"A wrapper over `find-by-keys` that additionally takes an aggregate SQL
expression (a string), and returns just a single result: the value of that
of that aggregate for the matching rows.
Accepts all the same options as `find-by-keys` except `:columns` since that
is used internally by this wrapper to pass the aggregate expression in."
([connectable table aggregate key-map]
(aggregate-by-keys connectable table aggregate key-map {}))
([connectable table aggregate key-map opts]
(let [opts (merge (:options connectable) opts)
_
(when-not (string? aggregate)
(throw (IllegalArgumentException.
"aggregate-by-keys requires a string aggregate expression")))
_
(when (:columns opts)
(throw (IllegalArgumentException.
"aggregate-by-keys does not support the :columns option")))
;; this should be unique enough as an alias to never clash with
;; a real column name in anyone's tables -- in addition it is
;; stable for a given aggregate expression so it should allow
;; for query caching in the JDBC driver:
;; (we use abs to avoid negative hash codes which would produce
;; a hyphen in the alias name which is not valid in SQL identifiers)
total-name (str "next_jdbc_aggregate_"
(Math/abs (.hashCode ^String aggregate)))
total-column (keyword total-name)
;; because some databases return uppercase column names:
total-col-u (keyword (str/upper-case total-name))]
(-> (find-by-keys connectable table key-map
(assoc opts :columns [[aggregate total-column]]))
(first)
(as-> row (or (get row total-column) (get row total-col-u)))))))
(defn get-by-id
"Syntactic sugar over `execute-one!` to make certain common queries easier.
Given a connectable object, a table name, and a primary key value, returns
a hash map of the first row that matches.
By default, the primary key is assumed to be `id` but that can be overridden
in the five-argument call.
As with `find-by-keys`, you can specify `:columns` to return just a
subset of the columns in the returned row.
Technically, this also supports `:order-by`, `:top`, `:limit`, `:offset`,
and `:fetch` -- like `find-by-keys` -- but they don't make as much sense
here since only one row is ever returned."
([connectable table pk]
(get-by-id connectable table pk :id {}))
([connectable table pk opts]
(get-by-id connectable table pk :id opts))
([connectable table pk pk-name opts]
(let [opts (merge (:options connectable) opts)]
(execute-one! connectable (for-query table {pk-name pk} opts) opts))))
(defn update!
"Syntactic sugar over `execute-one!` to make certain common updates easier.
Given a connectable object, a table name, a hash map of columns and values
to set, and either a hash map of columns and values to search on or a vector
of a SQL where clause and parameters, perform an update on the table."
([connectable table key-map where-params]
(update! connectable table key-map where-params {}))
([connectable table key-map where-params opts]
(let [opts (merge (:options connectable) opts)]
(execute-one! connectable
(for-update table key-map where-params opts)
opts))))
(defn delete!
"Syntactic sugar over `execute-one!` to make certain common deletes easier.
Given a connectable object, a table name, and either a hash map of columns
and values to search on or a vector of a SQL where clause and parameters,
perform a delete on the table."
([connectable table where-params]
(delete! connectable table where-params {}))
([connectable table where-params opts]
(let [opts (merge (:options connectable) opts)]
(execute-one! connectable (for-delete table where-params opts) opts))))
next-jdbc-1.3.955/src/next/jdbc/sql/ 0000775 0000000 0000000 00000000000 14700603111 0017013 5 ustar 00root root 0000000 0000000 next-jdbc-1.3.955/src/next/jdbc/sql/builder.clj 0000664 0000000 0000000 00000027467 14700603111 0021153 0 ustar 00root root 0000000 0000000 ;; copyright (c) 2019-2024 Sean Corfield, all rights reserved
(ns next.jdbc.sql.builder
"Some utility functions for building SQL strings.
These were originally private functions in `next.jdbc.sql` but
they may prove useful to developers who want to write their own
'SQL sugar' functions, such as a database-specific `upsert!` etc."
(:require [clojure.string :as str]))
(set! *warn-on-reflection* true)
(defn as-?
"Given a hash map of column names and values, or a vector of column names,
return a string of `?` placeholders for them."
[key-map _]
(str/join ", " (repeat (count key-map) "?")))
;; similar to honeysql, by default we disallow suspicious
;; characters in table and column names when building SQL:
(def ^:private ^:dynamic *allow-suspicious-entities* false)
(defn qualified-name
"Like `clojure.core/name` but preserves the qualifier, if any.
Intended for use with `:name-fn`, instead of the default `name`."
[k]
(cond-> (str k)
(keyword? k)
(subs 1)))
(defn- safe-name
"A wrapper for `name` that throws an exception if the
resulting string looks 'suspicious' as a table or column."
[k name-fn]
(let [entity (name-fn k)
suspicious #";"]
(when-not *allow-suspicious-entities*
(when (re-find suspicious entity)
(throw (ex-info (str "suspicious character found in entity: " entity)
{:disallowed suspicious}))))
entity))
(defn as-cols
"Given a sequence of raw column names, return a string of all the
formatted column names.
If a raw column name is a keyword, apply `:column-fn` to its name,
from the options if present.
If a raw column name is a vector pair, treat it as an expression with
an alias. If the first item is a keyword, apply `:column-fn` to its
name, else accept it as-is. The second item should be a keyword and
that will have `:column-fn` applied to its name.
This allows columns to be specified as simple names, e.g., `:foo`,
as simple aliases, e.g., `[:foo :bar]`, or as expressions with an
alias, e.g., `[\"count(*)\" :total]`."
[cols opts]
(let [col-fn (:column-fn opts identity)
name-fn (:name-fn opts name)]
(str/join ", " (map (fn [raw]
(if (vector? raw)
(if (keyword? (first raw))
(str (col-fn (safe-name (first raw) name-fn))
" AS "
(col-fn (safe-name (second raw) name-fn)))
(str (first raw)
" AS "
(col-fn (safe-name (second raw) name-fn))))
(col-fn (safe-name raw name-fn))))
cols))))
(defn as-keys
"Given a hash map of column names and values, return a string of all the
column names.
Applies any `:column-fn` supplied in the options."
[key-map opts]
(as-cols (keys key-map) opts))
(defn by-keys
"Given a hash map of column names and values and a clause type
(`:set`, `:where`), return a vector of a SQL clause and its parameters.
Applies any `:column-fn` supplied in the options."
[key-map clause opts]
(let [entity-fn (:column-fn opts identity)
name-fn (:name-fn opts name)
[where params] (reduce-kv (fn [[conds params] k v]
(let [e (entity-fn (safe-name k name-fn))]
(if (and (= :where clause) (nil? v))
[(conj conds (str e " IS NULL")) params]
[(conj conds (str e " = ?")) (conj params v)])))
[[] []]
key-map)]
(assert (seq where) "key-map may not be empty")
(into [(str (str/upper-case (safe-name clause name-fn)) " "
(str/join (if (= :where clause) " AND " ", ") where))]
params)))
(defn for-delete
"Given a table name and either a hash map of column names and values or a
vector of SQL (where clause) and its parameters, return a vector of the
full `DELETE` SQL string and its parameters.
Applies any `:table-fn` / `:column-fn` supplied in the options.
If `:suffix` is provided in `opts`, that string is appended to the
`DELETE ...` statement."
[table where-params opts]
(let [entity-fn (:table-fn opts identity)
name-fn (:name-fn opts name)
where-params (if (map? where-params)
(by-keys where-params :where opts)
(into [(str "WHERE " (first where-params))]
(rest where-params)))]
(into [(str "DELETE FROM " (entity-fn (safe-name table name-fn))
" " (first where-params)
(when-let [suffix (:suffix opts)]
(str " " suffix)))]
(rest where-params))))
(defn for-insert
"Given a table name and a hash map of column names and their values,
return a vector of the full `INSERT` SQL string and its parameters.
Applies any `:table-fn` / `:column-fn` supplied in the options.
If `:suffix` is provided in `opts`, that string is appended to the
`INSERT ...` statement."
[table key-map opts]
(let [entity-fn (:table-fn opts identity)
name-fn (:name-fn opts name)
params (as-keys key-map opts)
places (as-? key-map opts)]
(assert (seq key-map) "key-map may not be empty")
(into [(str "INSERT INTO " (entity-fn (safe-name table name-fn))
" (" params ")"
" VALUES (" places ")"
(when-let [suffix (:suffix opts)]
(str " " suffix)))]
(vals key-map))))
(defn for-insert-multi
"Given a table name, a vector of column names, and a vector of row values
(each row is a vector of its values), return a vector of the full `INSERT`
SQL string and its parameters.
Applies any `:table-fn` / `:column-fn` supplied in the options.
If `:batch` is set to `true` in `opts` the INSERT statement will be prepared
using a single set of placeholders and remaining parameters in the vector will
be grouped at the row level.
If `:suffix` is provided in `opts`, that string is appended to the
`INSERT ...` statement."
[table cols rows opts]
(assert (apply = (count cols) (map count rows))
"column counts are not consistent across cols and rows")
;; to avoid generating bad SQL
(assert (seq cols) "cols may not be empty")
(assert (seq rows) "rows may not be empty")
(let [table-fn (:table-fn opts identity)
name-fn (:name-fn opts name)
batch? (:batch opts)
params (as-cols cols opts)
places (as-? (first rows) opts)]
(into [(str "INSERT INTO " (table-fn (safe-name table name-fn))
" (" params ")"
" VALUES "
(if batch?
(str "(" places ")")
(str/join ", " (repeat (count rows) (str "(" places ")"))))
(when-let [suffix (:suffix opts)]
(str " " suffix)))]
(if batch? identity cat)
rows)))
(comment
(as-cols [:aa :bb :cc] {})
(for-insert-multi :table [:aa :bb :cc] [[1 2 3] [4 5 6]]
{:table-fn str/upper-case :column-fn str/capitalize})
)
(defn for-order-col
"Given a column name, or a pair of column name and direction,
return the sub-clause for addition to `ORDER BY`."
[col opts]
(let [entity-fn (:column-fn opts identity)
name-fn (:name-fn opts name)]
(cond (keyword? col)
(entity-fn (safe-name col name-fn))
(and (vector? col) (= 2 (count col)) (keyword? (first col)))
(str (entity-fn (safe-name (first col) name-fn))
" "
(or (get {:asc "ASC" :desc "DESC"} (second col))
(throw (IllegalArgumentException.
(str ":order-by " col
" expected :asc or :desc")))))
:else
(throw (IllegalArgumentException.
(str ":order-by expected keyword or keyword pair,"
" found: " col))))))
(defn for-order
"Given an `:order-by` vector, return an `ORDER BY` clause."
[order-by opts]
(when-not (vector? order-by)
(throw (IllegalArgumentException. ":order-by must be a vector")))
(assert (seq order-by) ":order-by may not be empty")
(str "ORDER BY "
(str/join ", " (map #(for-order-col % opts) order-by))))
(defn for-query
"Given a table name and either a hash map of column names and values or a
vector of SQL (where clause) and its parameters, return a vector of the
full `SELECT` SQL string and its parameters.
Applies any `:table-fn` / `:column-fn` supplied in the options.
Handles pagination options (`:top`, `:limit` / `:offset`, or `:offset` /
`:fetch`) for SQL Server, MySQL / SQLite, ANSI SQL respectively.
By default, this selects all columns, but if the `:columns` option is
present the select will only be those columns.
If `:suffix` is provided in `opts`, that string is appended to the
`SELECT ...` statement."
[table where-params opts]
(let [entity-fn (:table-fn opts identity)
name-fn (:name-fn opts name)
where-params (cond (map? where-params)
(by-keys where-params :where opts)
(= :all where-params)
[nil]
:else
(into [(str "WHERE " (first where-params))]
(rest where-params)))
where-params (cond-> (if (:top opts)
(into [(first where-params)]
(cons (:top opts) (rest where-params)))
where-params)
(:limit opts) (conj (:limit opts))
(:offset opts) (conj (:offset opts))
(:fetch opts) (conj (:fetch opts)))]
(into [(str "SELECT "
(when (:top opts)
"TOP ? ")
(if-let [cols (seq (:columns opts))]
(as-cols cols opts)
"*")
" FROM " (entity-fn (safe-name table name-fn))
(when-let [clause (first where-params)]
(str " " clause))
(when-let [order-by (:order-by opts)]
(str " " (for-order order-by opts)))
(when (:limit opts)
" LIMIT ?")
(when (:offset opts)
(if (:limit opts)
" OFFSET ?"
" OFFSET ? ROWS"))
(when (:fetch opts)
" FETCH NEXT ? ROWS ONLY")
(when-let [suffix (:suffix opts)]
(str " " suffix)))]
(rest where-params))))
(defn for-update
"Given a table name, a vector of column names to set and their values, and
either a hash map of column names and values or a vector of SQL (where clause)
and its parameters, return a vector of the full `UPDATE` SQL string and its
parameters.
Applies any `:table-fn` / `:column-fn` supplied in the options.
If `:suffix` is provided in `opts`, that string is appended to the
`UPDATE ...` statement."
[table key-map where-params opts]
(let [entity-fn (:table-fn opts identity)
name-fn (:name-fn opts name)
set-params (by-keys key-map :set opts)
where-params (if (map? where-params)
(by-keys where-params :where opts)
(into [(str "WHERE " (first where-params))]
(rest where-params)))]
(-> [(str "UPDATE " (entity-fn (safe-name table name-fn))
" " (first set-params)
" " (first where-params)
(when-let [suffix (:suffix opts)]
(str " " suffix)))]
(into (rest set-params))
(into (rest where-params)))))
next-jdbc-1.3.955/src/next/jdbc/sql_logging.clj 0000664 0000000 0000000 00000005012 14700603111 0021211 0 ustar 00root root 0000000 0000000 ;; copyright (c) 2021-2023 Sean Corfield, all rights reserved
(ns ^:no-doc next.jdbc.sql-logging
"Implementation of sql-logging logic."
(:require [next.jdbc.protocols :as p]))
(set! *warn-on-reflection* true)
(defrecord SQLLogging [connectable sql-logger result-logger options])
(extend-protocol p/Sourceable
SQLLogging
(get-datasource [this]
(p/get-datasource (:connectable this))))
(extend-protocol p/Connectable
SQLLogging
(get-connection [this opts]
(p/get-connection (:connectable this)
(merge (:options this) opts))))
(defn- result-logger-helper [result this sym state]
(when-let [logger (:result-logger this)]
(logger sym state result)))
(extend-protocol p/Executable
SQLLogging
(-execute [this sql-params opts]
;; no result-logger call possible:
((:sql-logger this) 'next.jdbc/plan sql-params)
(p/-execute (:connectable this) sql-params
(merge (:options this) opts)))
(-execute-one [this sql-params opts]
(let [state ((:sql-logger this) 'next.jdbc/execute-one! sql-params)]
(try
(doto (p/-execute-one (:connectable this) sql-params
(merge (:options this) opts))
(result-logger-helper this 'next.jdbc/execute-one! state))
(catch Throwable t
(result-logger-helper t this 'next.jdbc/execute-one! state)
(throw t)))))
(-execute-all [this sql-params opts]
(let [state ((:sql-logger this) 'next.jdbc/execute! sql-params)]
(try
(doto (p/-execute-all (:connectable this) sql-params
(merge (:options this) opts))
(result-logger-helper this 'next.jdbc/execute! state))
(catch Throwable t
(result-logger-helper t this 'next.jdbc/execute! state)
(throw t))))))
(extend-protocol p/Preparable
SQLLogging
(prepare [this sql-params opts]
;; no result-logger call possible:
((:sql-logger this) 'next.jdbc/prepare sql-params)
(p/prepare (:connectable this) sql-params
(merge (:options this) opts))))
(extend-protocol p/Transactable
SQLLogging
(-transact [this body-fn opts]
(p/-transact (:connectable this) body-fn
(merge (:options this) opts))))
next-jdbc-1.3.955/src/next/jdbc/transaction.clj 0000664 0000000 0000000 00000014163 14700603111 0021240 0 ustar 00root root 0000000 0000000 ;; copyright (c) 2018-2021 Sean Corfield, all rights reserved
(ns next.jdbc.transaction
"Implementation of SQL transaction logic.
In general, you cannot nest transactions. `clojure.java.jdbc` would
ignore any attempt to create a nested transaction, even tho' some
databases do support it. `next.jdbc` allows you to call `with-transaction`
even while you are inside an active transaction, but the behavior may
vary across databases and the commit or rollback boundaries may not be
what you expect. In order to avoid two transactions constructed on the
same connection from interfering with each other, `next.jdbc` locks on
the `Connection` object (this prevents concurrent transactions on separate
threads from interfering but will cause deadlock on a single thread --
so beware).
Consequently, this namespace exposes a dynamic variable, `*nested-tx*`,
which can be used to vary the behavior when an attempt is made to start
a transaction when you are already inside a transaction."
(:require [next.jdbc.protocols :as p])
(:import (java.sql Connection)))
(set! *warn-on-reflection* true)
(defonce ^:dynamic
^{:doc "Controls the behavior when a nested transaction is attempted.
Possible values are:
* `:allow` -- the default: assumes you know what you are doing!
* `:ignore` -- the same behavior as `clojure.java.jdbc`: the nested
transaction is simply ignored and any SQL operations inside it are
executed in the context of the outer transaction.
* `:prohibit` -- any attempt to create a nested transaction will throw
an exception: this is the safest but most restrictive approach so
that you can make sure you don't accidentally have any attempts
to create nested transactions (since that might be a bug in your code)."}
*nested-tx*
:allow)
(defonce ^:private ^:dynamic ^{:doc "Used to detect nested transactions."}
*active-tx* false)
(def ^:private isolation-levels
"Transaction isolation levels."
{:none Connection/TRANSACTION_NONE
:read-committed Connection/TRANSACTION_READ_COMMITTED
:read-uncommitted Connection/TRANSACTION_READ_UNCOMMITTED
:repeatable-read Connection/TRANSACTION_REPEATABLE_READ
:serializable Connection/TRANSACTION_SERIALIZABLE})
(defn- transact*
"Run the given function inside a transaction created on the given connection.
Isolation level options can be provided, as well as marking the transaction
as read-only and/or rollback-only (so it will automatically rollback
instead of committing any changes)."
[^Connection con f opts]
(let [isolation (:isolation opts)
read-only (:read-only opts)
rollback-only (:rollback-only opts)
old-autocommit (.getAutoCommit con)
old-isolation (.getTransactionIsolation con)
old-readonly (.isReadOnly con)
restore-ac? (volatile! true)]
(io!
(when isolation
(.setTransactionIsolation con (isolation isolation-levels)))
(when read-only
(.setReadOnly con true))
(.setAutoCommit con false)
(try
(let [result (f con)]
(if rollback-only
(do
;; per #80: if the rollback operation fails, we do not
;; want to try to restore auto-commit...
(vreset! restore-ac? false)
(.rollback con)
(vreset! restore-ac? true))
(.commit con))
result)
(catch Throwable t
(try
;; per #80: if the rollback operation fails, we do not
;; want to try to restore auto-commit...
(vreset! restore-ac? false)
(.rollback con)
(vreset! restore-ac? true)
(catch Throwable rb
;; combine both exceptions
(throw (ex-info (str "Rollback failed handling \""
(.getMessage t)
"\"")
{:rollback rb
:handling t}))))
(throw t))
(finally ; tear down
;; the following can throw SQLExceptions but we do not
;; want those to replace any exception currently being
;; handled -- and if the connection got closed, we just
;; want to ignore exceptions here anyway
(when @restore-ac?
(try ; only restore auto-commit if rollback did not fail
(.setAutoCommit con old-autocommit)
(catch Exception _)))
(when isolation
(try
(.setTransactionIsolation con old-isolation)
(catch Exception _)))
(when read-only
(try
(.setReadOnly con old-readonly)
(catch Exception _))))))))
(extend-protocol p/Transactable
java.sql.Connection
(-transact [this body-fn opts]
(cond
(and (not *active-tx*) (= :ignore *nested-tx*))
;; #245 do not lock when in c.j.j compatibility mode:
(binding [*active-tx* true]
(transact* this body-fn opts))
(or (not *active-tx*) (= :allow *nested-tx*))
(locking this
(binding [*active-tx* true]
(transact* this body-fn opts)))
(= :ignore *nested-tx*)
(body-fn this)
(= :prohibit *nested-tx*)
(throw (IllegalStateException. "Nested transactions are prohibited"))
:else
(throw (IllegalArgumentException.
(str "*nested-tx* ("
*nested-tx*
") was not :allow, :ignore, or :prohibit")))))
javax.sql.DataSource
(-transact [this body-fn opts]
(cond (or (not *active-tx*) (= :allow *nested-tx*))
(binding [*active-tx* true]
(with-open [con (p/get-connection this opts)]
(transact* con body-fn opts)))
(= :ignore *nested-tx*)
(with-open [con (p/get-connection this opts)]
(body-fn con))
(= :prohibit *nested-tx*)
(throw (IllegalStateException. "Nested transactions are prohibited"))
:else
(throw (IllegalArgumentException.
(str "*nested-tx* ("
*nested-tx*
") was not :allow, :ignore, or :prohibit")))))
Object
(-transact [this body-fn opts]
(p/-transact (p/get-datasource this) body-fn opts)))
next-jdbc-1.3.955/src/next/jdbc/types.clj 0000664 0000000 0000000 00000002737 14700603111 0020063 0 ustar 00root root 0000000 0000000 ;; copyright (c) 2018-2024 Sean Corfield, all rights reserved
(ns next.jdbc.types
"Provides convenience functions for wrapping values you pass into SQL
operations that have per-instance implementations of `SettableParameter`
so that `.setObject()` is called with the appropriate `java.sql.Types` value."
(:require [clojure.string :as str]
[next.jdbc.prepare])
(:import (java.lang.reflect Field Modifier)
(java.sql PreparedStatement)))
(set! *warn-on-reflection* true)
(defmacro ^:private all-types
[]
(let [names
(into []
(comp (filter #(Modifier/isStatic (.getModifiers ^Field %)))
(map #(.getName ^Field %)))
(.getDeclaredFields java.sql.Types))]
`(do
~@(for [n names]
(let [as-n (symbol (str "as-"
(-> n
(str/lower-case)
(str/replace "_" "-"))))]
`(defn ~as-n
~(str "Wrap a Clojure value in a thunk with metadata to implement `set-parameter`
so that `.setObject()` is called with the `java.sql.Types/" n "` SQL type.")
[~'obj]
(with-meta (constantly ~'obj)
{'next.jdbc.prepare/set-parameter
(fn [vf# ^PreparedStatement s# ^long i#]
(.setObject s# i# (vf#) ~(symbol "java.sql.Types" n)))})))))))
(all-types)
(comment
(macroexpand '(all-types)))
next-jdbc-1.3.955/test/ 0000775 0000000 0000000 00000000000 14700603111 0014524 5 ustar 00root root 0000000 0000000 next-jdbc-1.3.955/test/log4j2-info.properties 0000664 0000000 0000000 00000001567 14700603111 0020705 0 ustar 00root root 0000000 0000000 # Sean's normal mode, shows INFO and above, with highlighting:
rootLogger.level = info
rootLogger.appenderRef.stdout.ref = STDOUT
appender.console.type = Console
appender.console.name = STDOUT
appender.console.layout.type = PatternLayout
appender.console.layout.pattern = [%c] %highlight{%m}%n
# I do not care about any of c3p0's INFO messages...
logger.c3p0.name = com.mchange.v2
logger.c3p0.level = warn
logger.c3p0.appenderRef.stdout.ref = STDOUT
# ...nor HikariCP...
logger.hikari.name = com.zaxxer.hikari
logger.hikari.level = warn
logger.hikari.appenderRef.stdout.ref = STDOUT
# ...nor embedded HSQLDB...
logger.hsqldb.name = hsqldb.db
logger.hsqldb.level = warn
logger.hsqldb.appenderRef.stdout.ref = STDOUT
# ...nor embedded PostgreSQL...
logger.postgres.name = io.zonky.test.db.postgres.embedded
logger.postgres.level = warn
logger.postgres.appenderRef.stdout.ref = STDOUT
next-jdbc-1.3.955/test/next/ 0000775 0000000 0000000 00000000000 14700603111 0015502 5 ustar 00root root 0000000 0000000 next-jdbc-1.3.955/test/next/jdbc/ 0000775 0000000 0000000 00000000000 14700603111 0016404 5 ustar 00root root 0000000 0000000 next-jdbc-1.3.955/test/next/jdbc/connection_string_test.clj 0000664 0000000 0000000 00000003231 14700603111 0023661 0 ustar 00root root 0000000 0000000 ;; copyright (c) 2019-2021 Sean Corfield, all rights reserved
(ns next.jdbc.connection-string-test
"Tests for the main hash map spec to JDBC URL logic and the get-datasource
and get-connection protocol implementations.
At some point, the datasource/connection tests should probably be extended
to accept EDN specs from an external source (environment variables?)."
(:require [clojure.string :as str]
[clojure.test :refer [deftest is testing use-fixtures]]
[next.jdbc.connection :as c]
[next.jdbc.protocols :as p]
[next.jdbc.specs :as specs]
[next.jdbc.test-fixtures :refer [with-test-db db]]))
(set! *warn-on-reflection* true)
(use-fixtures :once with-test-db)
(specs/instrument)
(deftest test-uri-strings
(testing "datasource via String"
(let [db-spec (db)
db-spec (if (= "embedded-postgres" (:dbtype db-spec))
(assoc db-spec :dbtype "postgresql")
db-spec)
[url etc] (#'c/spec->url+etc db-spec)
{:keys [user password]} etc
etc (dissoc etc :user :password)
uri (-> url
;; strip jdbc: prefix for fun
(str/replace #"^jdbc:" "")
(str/replace #";" "?") ; for SQL Server tests
(str/replace #":sqlserver" "") ; for SQL Server tests
(cond-> (and user password)
(str/replace #"://" (str "://" user ":" password "@"))))
ds (p/get-datasource (assoc etc :jdbcUrl uri))]
(when (and user password)
(with-open [con (p/get-connection ds {})]
(is (instance? java.sql.Connection con)))))))
next-jdbc-1.3.955/test/next/jdbc/connection_test.clj 0000664 0000000 0000000 00000024331 14700603111 0022277 0 ustar 00root root 0000000 0000000 ;; copyright (c) 2019-2021 Sean Corfield, all rights reserved
(ns next.jdbc.connection-test
"Tests for the main hash map spec to JDBC URL logic and the get-datasource
and get-connection protocol implementations.
At some point, the datasource/connection tests should probably be extended
to accept EDN specs from an external source (environment variables?)."
(:require [clojure.string :as str]
[clojure.test :refer [deftest is testing]]
[next.jdbc.connection :as c]
[next.jdbc.protocols :as p])
(:import (com.zaxxer.hikari HikariDataSource)
(com.mchange.v2.c3p0 ComboPooledDataSource PooledDataSource)))
(set! *warn-on-reflection* true)
(def ^:private db-name "clojure_test")
(deftest test-aliases-and-defaults
(testing "aliases"
(is (= (#'c/spec->url+etc {:dbtype "hsql" :dbname db-name})
(#'c/spec->url+etc {:dbtype "hsqldb" :dbname db-name})))
(is (= (#'c/spec->url+etc {:dbtype "jtds" :dbname db-name})
(#'c/spec->url+etc {:dbtype "jtds:sqlserver" :dbname db-name})))
(is (= (#'c/spec->url+etc {:dbtype "mssql" :dbname db-name})
(#'c/spec->url+etc {:dbtype "sqlserver" :dbname db-name})))
(is (= (#'c/spec->url+etc {:dbtype "oracle" :dbname db-name})
(#'c/spec->url+etc {:dbtype "oracle:thin" :dbname db-name})))
(is (= (#'c/spec->url+etc {:dbtype "oracle:sid" :dbname db-name})
(-> (#'c/spec->url+etc {:dbtype "oracle:thin" :dbname db-name})
;; oracle:sid uses : before DB name, not /
(update 0 str/replace (re-pattern (str "/" db-name)) (str ":" db-name)))))
(is (= (#'c/spec->url+etc {:dbtype "oracle:oci" :dbname db-name})
(-> (#'c/spec->url+etc {:dbtype "oracle:thin" :dbname db-name})
;; oracle:oci and oracle:thin only differ in the protocol
(update 0 str/replace #":thin" ":oci"))))
(is (= (#'c/spec->url+etc {:dbtype "postgres" :dbname db-name})
(#'c/spec->url+etc {:dbtype "postgresql" :dbname db-name}))))
(testing "default ports"
(is (= (#'c/spec->url+etc {:dbtype "jtds:sqlserver" :dbname db-name})
(#'c/spec->url+etc {:dbtype "jtds:sqlserver" :dbname db-name :port 1433})))
(is (= (#'c/spec->url+etc {:dbtype "mysql" :dbname db-name})
(#'c/spec->url+etc {:dbtype "mysql" :dbname db-name :port 3306})))
(is (= (#'c/spec->url+etc {:dbtype "oracle:oci" :dbname db-name})
(#'c/spec->url+etc {:dbtype "oracle:oci" :dbname db-name :port 1521})))
(is (= (#'c/spec->url+etc {:dbtype "oracle:sid" :dbname db-name})
(#'c/spec->url+etc {:dbtype "oracle:sid" :dbname db-name :port 1521})))
(is (= (#'c/spec->url+etc {:dbtype "oracle:thin" :dbname db-name})
(#'c/spec->url+etc {:dbtype "oracle:thin" :dbname db-name :port 1521})))
(is (= (#'c/spec->url+etc {:dbtype "postgresql" :dbname db-name})
(#'c/spec->url+etc {:dbtype "postgresql" :dbname db-name :port 5432})))
(is (= (#'c/spec->url+etc {:dbtype "sqlserver" :dbname db-name})
(#'c/spec->url+etc {:dbtype "sqlserver" :dbname db-name :port 1433})))))
(deftest custom-dbtypes
(is (= ["jdbc:acme:my-db" {} nil]
(#'c/spec->url+etc {:dbtype "acme" :classname "java.lang.String"
:dbname "my-db" :host :none})))
(is (= ["jdbc:acme://127.0.0.1/my-db" {} nil]
(#'c/spec->url+etc {:dbtype "acme" :classname "java.lang.String"
:dbname "my-db"})))
(is (= ["jdbc:acme://12.34.56.70:1234/my-db" {} nil]
(#'c/spec->url+etc {:dbtype "acme" :classname "java.lang.String"
:dbname "my-db" :host "12.34.56.70" :port 1234})))
(is (= ["jdbc:acme:dsn=my-db" {} nil]
(#'c/spec->url+etc {:dbtype "acme" :classname "java.lang.String"
:dbname "my-db" :host :none
:dbname-separator ":dsn="})))
(is (= ["jdbc:acme:(*)127.0.0.1/my-db" {} nil]
(#'c/spec->url+etc {:dbtype "acme" :classname "java.lang.String"
:dbname "my-db"
:host-prefix "(*)"})))
(is (= ["jdbc:acme:(*)12.34.56.70:1234/my-db" {} nil]
(#'c/spec->url+etc {:dbtype "acme" :classname "java.lang.String"
:dbname "my-db" :host "12.34.56.70" :port 1234
:host-prefix "(*)"})))
(is (= ["jdbc:acme:(*)12.34.56.70/my-db" {} nil]
(#'c/spec->url+etc {:dbtype "acme" :classname "java.lang.String"
:dbname "my-db" :host "12.34.56.70" :port :none
:host-prefix "(*)"}))))
(deftest jdbc-url-tests
(testing "basic URLs work"
(is (= "jdbc:acme:my-db"
(c/jdbc-url {:dbtype "acme" :classname "java.lang.String"
:dbname "my-db" :host :none})))
(is (= "jdbc:acme://127.0.0.1/my-db"
(c/jdbc-url {:dbtype "acme" :classname "java.lang.String"
:dbname "my-db"})))
(is (= "jdbc:acme://12.34.56.70:1234/my-db"
(c/jdbc-url {:dbtype "acme" :classname "java.lang.String"
:dbname "my-db" :host "12.34.56.70" :port 1234})))
(is (= "jdbc:acme:dsn=my-db"
(c/jdbc-url {:dbtype "acme" :classname "java.lang.String"
:dbname "my-db" :host :none
:dbname-separator ":dsn="})))
(is (= "jdbc:acme:(*)127.0.0.1/my-db"
(c/jdbc-url {:dbtype "acme" :classname "java.lang.String"
:dbname "my-db"
:host-prefix "(*)"})))
(is (= "jdbc:acme:(*)12.34.56.70:1234/my-db"
(c/jdbc-url {:dbtype "acme" :classname "java.lang.String"
:dbname "my-db" :host "12.34.56.70" :port 1234
:host-prefix "(*)"}))))
(testing "URLs with properties work"
(is (= "jdbc:acme:my-db?useSSL=true"
(c/jdbc-url {:dbtype "acme" :classname "java.lang.String"
:dbname "my-db" :host :none
:useSSL true})))
(is (boolean (#{"jdbc:acme:my-db?useSSL=true&user=dba"
"jdbc:acme:my-db?user=dba&useSSL=true"}
(c/jdbc-url {:dbtype "acme" :classname "java.lang.String"
:dbname "my-db" :host :none
:useSSL true :user "dba"}))))
(is (= "jdbc:jtds:sqlserver:my-db;useSSL=true"
(c/jdbc-url {:dbtype "jtds"
:dbname "my-db" :host :none
:useSSL true})))
(is (boolean (#{"jdbc:jtds:sqlserver:my-db;useSSL=true;user=dba"
"jdbc:jtds:sqlserver:my-db;user=dba;useSSL=true"}
(c/jdbc-url {:dbtype "jtds"
:dbname "my-db" :host :none
:useSSL true :user "dba"}))))))
;; these are the 'local' databases that we can always test against
(def test-db-type ["derby" "h2" "h2:mem" "hsqldb" "sqlite"])
(def test-dbs
(for [db test-db-type]
(cond-> {:dbtype db :dbname (str db-name "_" (str/replace db #":" "_"))}
(= "derby" db)
(assoc :create true))))
(deftest test-sourceable-via-metadata
(doseq [db test-dbs]
(let [ds (p/get-datasource
^{`p/get-datasource (fn [v] (p/get-datasource (first v)))} [db])]
(is (instance? javax.sql.DataSource ds)))))
(deftest test-get-connection
(doseq [db test-dbs]
(println 'test-get-connection (:dbtype db))
(testing "datasource via Associative"
(let [ds (p/get-datasource db)]
(is (instance? javax.sql.DataSource ds))
(is (str/index-of (pr-str ds) (str "jdbc:" (:dbtype db))))
;; checks get-datasource on a DataSource is identity
(is (identical? ds (p/get-datasource ds)))
(with-open [con (p/get-connection ds {})]
(is (instance? java.sql.Connection con)))))
(testing "datasource via String"
(let [[url _] (#'c/spec->url+etc db)
ds (p/get-datasource url)]
(is (instance? javax.sql.DataSource ds))
(is (str/index-of (pr-str ds) url))
(.setLoginTimeout ds 0)
(is (= 0 (.getLoginTimeout ds)))
(with-open [con (p/get-connection ds {})]
(is (instance? java.sql.Connection con)))))
(testing "datasource via jdbcUrl"
(let [[url etc] (#'c/spec->url+etc db)
ds (p/get-datasource (assoc etc :jdbcUrl url))]
(if (= "derby" (:dbtype db))
(is (= {:create true} etc))
(is (= {} etc)))
(is (instance? javax.sql.DataSource ds))
(is (str/index-of (pr-str ds) (str "jdbc:" (:dbtype db))))
;; checks get-datasource on a DataSource is identity
(is (identical? ds (p/get-datasource ds)))
(.setLoginTimeout ds 1)
(is (= 1 (.getLoginTimeout ds)))
(with-open [con (p/get-connection ds {})]
(is (instance? java.sql.Connection con)))))
(testing "datasource via HikariCP"
;; the type hint is only needed because we want to call .close
(with-open [^HikariDataSource ds (c/->pool HikariDataSource db)]
(is (instance? javax.sql.DataSource ds))
;; checks get-datasource on a DataSource is identity
(is (identical? ds (p/get-datasource ds)))
(with-open [con (p/get-connection ds {})]
(is (instance? java.sql.Connection con)))))
(testing "datasource via c3p0"
;; the type hint is only needed because we want to call .close
(with-open [^PooledDataSource ds (c/->pool ComboPooledDataSource db)]
(is (instance? javax.sql.DataSource ds))
;; checks get-datasource on a DataSource is identity
(is (identical? ds (p/get-datasource ds)))
(with-open [con (p/get-connection ds {})]
(is (instance? java.sql.Connection con)))))
(testing "connection via map (Object)"
(with-open [con (p/get-connection db {})]
(is (instance? java.sql.Connection con))))))
(deftest issue-243-uri->db-spec
(is (= {:dbtype "mysql" :dbname "mydb"
:host "myserver" :port 1234
:user "foo" :password "bar"}
(c/uri->db-spec "mysql://foo:bar@myserver:1234/mydb")))
(is (= {:dbtype "mysql" :dbname "mydb"
:host "myserver" :port 1234
:user "foo" :password "bar"}
(c/uri->db-spec "jdbc:mysql://myserver:1234/mydb?user=foo&password=bar"))))
next-jdbc-1.3.955/test/next/jdbc/datafy_test.clj 0000664 0000000 0000000 00000013037 14700603111 0021411 0 ustar 00root root 0000000 0000000 ;; copyright (c) 2020-2021 Sean Corfield, all rights reserved
(ns next.jdbc.datafy-test
"Tests for the datafy extensions over JDBC types."
(:require [clojure.datafy :as d]
[clojure.set :as set]
[clojure.test :refer [deftest is testing use-fixtures]]
[next.jdbc :as jdbc]
[next.jdbc.datafy]
[next.jdbc.result-set :as rs]
[next.jdbc.specs :as specs]
[next.jdbc.test-fixtures
:refer [with-test-db db ds
derby? jtds? mysql? postgres? sqlite?]]))
(set! *warn-on-reflection* true)
(use-fixtures :once with-test-db)
(specs/instrument)
(def ^:private basic-connection-keys
"Generic JDBC Connection fields."
#{:autoCommit :catalog :clientInfo :holdability :metaData
:networkTimeout :schema :transactionIsolation :typeMap :warnings
;; boolean properties
:closed :readOnly
;; configured to be added as if by clojure.core/bean
:class})
(deftest connection-datafy-tests
(testing "connection datafication"
(with-open [con (jdbc/get-connection (ds))]
(let [reference-keys (cond-> basic-connection-keys
(derby?) (-> (disj :networkTimeout)
(conj :networkTimeout/exception))
(jtds?) (-> (disj :clientInfo :networkTimeout :schema)
(conj :clientInfo/exception
:networkTimeout/exception
:schema/exception)))
data (set (keys (d/datafy con)))]
(when-let [diff (seq (set/difference data reference-keys))]
(println (format "%6s :%-10s %s"
(:dbtype (db)) "connection" (str (sort diff)))))
(is (= reference-keys
(set/intersection reference-keys data)))))))
(def ^:private basic-database-metadata-keys
"Generic JDBC Connection fields."
#{:JDBCMajorVersion :JDBCMinorVersion :SQLKeywords :SQLStateType :URL
:catalogSeparator :catalogTerm :catalogs
:clientInfoProperties :connection
:databaseMajorVersion :databaseMinorVersion
:databaseProductName :databaseProductVersion
:defaultTransactionIsolation
:driverMajorVersion :driverMinorVersion :driverName :driverVersion
:extraNameCharacters :identifierQuoteString
:maxBinaryLiteralLength :maxCatalogNameLength :maxCharLiteralLength
:maxColumnNameLength :maxColumnsInGroupBy :maxColumnsInIndex
:maxColumnsInOrderBy :maxColumnsInSelect :maxColumnsInTable
:maxConnections
:maxCursorNameLength :maxIndexLength
:maxProcedureNameLength :maxRowSize :maxSchemaNameLength
:maxStatementLength :maxStatements :maxTableNameLength
:maxTablesInSelect :maxUserNameLength :numericFunctions
:procedureTerm :resultSetHoldability :rowIdLifetime
:schemaTerm :schemas :searchStringEscape :stringFunctions
:systemFunctions :tableTypes :timeDateFunctions
:typeInfo :userName
;; boolean properties
:catalogAtStart :readOnly
;; configured to be added as if by clojure.core/bean
:class
;; added by next.jdbc.datafy if the datafication succeeds
:all-tables})
(deftest database-metadata-datafy-tests
(testing "database metadata datafication"
(with-open [con (jdbc/get-connection (ds))]
(let [reference-keys (cond-> basic-database-metadata-keys
(jtds?) (-> (disj :clientInfoProperties :rowIdLifetime)
(conj :clientInfoProperties/exception
:rowIdLifetime/exception))
(postgres?) (-> (disj :rowIdLifetime)
(conj :rowIdLifetime/exception))
(sqlite?) (-> (disj :clientInfoProperties :rowIdLifetime)
(conj :clientInfoProperties/exception
:rowIdLifetime/exception)))
data (set (keys (d/datafy (.getMetaData con))))]
(when-let [diff (seq (set/difference data reference-keys))]
(println (format "%6s :%-10s %s"
(:dbtype (db)) "db-meta" (str (sort diff)))))
(is (= reference-keys
(set/intersection reference-keys data))))))
(testing "nav to catalogs yields object"
(with-open [con (jdbc/get-connection (ds))]
(let [data (d/datafy (.getMetaData con))]
(doseq [k (cond-> #{:catalogs :clientInfoProperties :schemas :tableTypes :typeInfo}
(jtds?) (disj :clientInfoProperties)
(sqlite?) (disj :clientInfoProperties))]
(let [rs (d/nav data k nil)]
(is (vector? rs))
(is (every? map? rs))))))))
(deftest result-set-metadata-datafy-tests
(testing "result set metadata datafication"
(let [data (reduce (fn [_ row] (reduced (rs/metadata row)))
nil
(jdbc/plan (ds) [(str "SELECT * FROM "
(if (mysql?) "fruit" "FRUIT"))]))]
(is (vector? data))
(is (= 5 (count data)))
(is (every? map? data))
(is (every? :label data)))))
(comment
(def con (jdbc/get-connection (ds)))
(rs/datafiable-result-set (.getTables (.getMetaData con) nil nil nil nil) con {})
(def ps (jdbc/prepare con ["SELECT * FROM fruit WHERE grade > ?"]))
(require '[next.jdbc.prepare :as prep])
(prep/set-parameters ps [30])
(.execute ps)
(.getResultSet ps)
(.close ps)
(.close con))
next-jdbc-1.3.955/test/next/jdbc/date_time_test.clj 0000664 0000000 0000000 00000005354 14700603111 0022077 0 ustar 00root root 0000000 0000000 ;; copyright (c) 2019-2021 Sean Corfield, all rights reserved
(ns next.jdbc.date-time-test
"Date/time parameter auto-conversion tests.
These tests contain no assertions. Without requiring `next.jdbc.date-time`
several of the `insert` operations would throw exceptions for some databases
so the test here just checks those operations 'succeed'."
(:require [clojure.test :refer [deftest is testing use-fixtures]]
[next.jdbc :as jdbc]
[next.jdbc.date-time] ; to extend SettableParameter to date/time
[next.jdbc.test-fixtures :refer [with-test-db db ds
mssql?]]
[next.jdbc.specs :as specs])
(:import (java.sql ResultSet)))
(set! *warn-on-reflection* true)
(use-fixtures :once with-test-db)
(specs/instrument)
(deftest issue-73
(try
(jdbc/execute-one! (ds) ["drop table fruit_time"])
(catch Throwable _))
(jdbc/execute-one! (ds) [(str "create table fruit_time (id int not null, deadline "
(if (mssql?) "datetime" "timestamp")
" not null)")])
(jdbc/execute-one! (ds) ["insert into fruit_time (id, deadline) values (?,?)" 1 (java.util.Date.)])
(jdbc/execute-one! (ds) ["insert into fruit_time (id, deadline) values (?,?)" 2 (java.time.Instant/now)])
(jdbc/execute-one! (ds) ["insert into fruit_time (id, deadline) values (?,?)" 3 (java.time.LocalDate/now)])
(jdbc/execute-one! (ds) ["insert into fruit_time (id, deadline) values (?,?)" 4 (java.time.LocalDateTime/now)])
(try
(jdbc/execute-one! (ds) ["drop table fruit_time"])
(catch Throwable _))
(jdbc/execute-one! (ds) ["create table fruit_time (id int not null, deadline time not null)"])
(jdbc/execute-one! (ds) ["insert into fruit_time (id, deadline) values (?,?)" 1 (java.util.Date.)])
(jdbc/execute-one! (ds) ["insert into fruit_time (id, deadline) values (?,?)" 2 (java.time.Instant/now)])
(jdbc/execute-one! (ds) ["insert into fruit_time (id, deadline) values (?,?)" 3 (java.time.LocalDate/now)])
(jdbc/execute-one! (ds) ["insert into fruit_time (id, deadline) values (?,?)" 4 (java.time.LocalDateTime/now)])
(try
(jdbc/execute-one! (ds) ["drop table fruit_time"])
(catch Throwable _))
(jdbc/execute-one! (ds) ["create table fruit_time (id int not null, deadline date not null)"])
(jdbc/execute-one! (ds) ["insert into fruit_time (id, deadline) values (?,?)" 1 (java.util.Date.)])
(jdbc/execute-one! (ds) ["insert into fruit_time (id, deadline) values (?,?)" 2 (java.time.Instant/now)])
(jdbc/execute-one! (ds) ["insert into fruit_time (id, deadline) values (?,?)" 3 (java.time.LocalDate/now)])
(jdbc/execute-one! (ds) ["insert into fruit_time (id, deadline) values (?,?)" 4 (java.time.LocalDateTime/now)]))
next-jdbc-1.3.955/test/next/jdbc/default_options_test.clj 0000664 0000000 0000000 00000000523 14700603111 0023334 0 ustar 00root root 0000000 0000000 ;; copyright (c) 2020-2021 Sean Corfield, all rights reserved
(ns next.jdbc.default-options-test
"Stub test namespace for default options. Nothing can really be tested
at this level tho'..."
(:require [clojure.test :refer [deftest is testing]]
[next.jdbc.default-options :refer :all]))
(set! *warn-on-reflection* true)
next-jdbc-1.3.955/test/next/jdbc/defer_test.clj 0000664 0000000 0000000 00000004600 14700603111 0021222 0 ustar 00root root 0000000 0000000 ;; copyright (c) 2024 Sean Corfield, all rights reserved
(ns next.jdbc.defer-test
"The idea behind the next.jdbc.defer namespace is to provide a
way to defer the execution of a series of SQL statements until
a later time, but still provide a way for inserted keys to be
used in later SQL statements.
The principle is to provide a core subset of the next.jdbc
and next.jdbc.sql API that produces a data structure that
describes a series of SQL operations to be performed, that
are held in a dynamic var, and that can be executed at a
later time, in a transaction."
(:require [clojure.test :refer [deftest is testing use-fixtures]]
[next.jdbc :as jdbc]
[next.jdbc.defer :as sut]
[next.jdbc.test-fixtures
:refer [ds with-test-db]]))
(set! *warn-on-reflection* true)
(use-fixtures :once with-test-db)
(deftest basic-test
(testing "data structures"
(is (= [{:sql-p ["INSERT INTO foo (name) VALUES (?)" "Sean"]
:key-fn :GENERATED_KEY
:key :id
:opts {:key-fn :GENERATED_KEY :key :id}}]
@(sut/defer-ops
#(sut/insert! :foo {:name "Sean"} {:key-fn :GENERATED_KEY :key :id})))))
(testing "execution"
(let [effects (sut/with-deferred (ds)
(sut/insert! :fruit {:name "Mango"} {:key :test}))]
(is (= {:test 1} @effects))
(is (= 1 (count (jdbc/execute! (ds)
["select * from fruit where name = ?"
"Mango"])))))
(let [effects (sut/with-deferred (ds)
(sut/insert! :fruit {:name "Dragonfruit"} {:key :test})
(sut/update! :fruit {:cost 123} {:name "Dragonfruit"})
(sut/delete! :fruit {:name "Dragonfruit"}))]
(is (= {:test 1} @effects))
(is (= 0 (count (jdbc/execute! (ds)
["select * from fruit where name = ?"
"Dragonfruit"])))))
(let [effects (sut/with-deferred (ds)
(sut/insert! :fruit {:name "Grapefruit" :bad_column 0} {:key :test}))]
(is (= :failed (try @effects
(catch Exception _ :failed))))
(is (= 0 (count (jdbc/execute! (ds)
["select * from fruit where name = ?"
"Grapefruit"])))))))
next-jdbc-1.3.955/test/next/jdbc/optional_test.clj 0000664 0000000 0000000 00000012760 14700603111 0021770 0 ustar 00root root 0000000 0000000 ;; copyright (c) 2019-2021 Sean Corfield, all rights reserved
(ns next.jdbc.optional-test
"Test namespace for the optional builder functions."
(:require [clojure.string :as str]
[clojure.test :refer [deftest is testing use-fixtures]]
[next.jdbc.optional :as opt]
[next.jdbc.protocols :as p]
[next.jdbc.test-fixtures :refer [with-test-db ds column
default-options]])
(:import (java.sql ResultSet ResultSetMetaData)))
(set! *warn-on-reflection* true)
(use-fixtures :once with-test-db)
(deftest test-map-row-builder
(testing "default row builder"
(let [row (p/-execute-one (ds)
["select * from fruit where id = ?" 1]
(assoc (default-options)
:builder-fn opt/as-maps))]
(is (map? row))
(is (not (contains? row (column :FRUIT/GRADE))))
(is (= 1 ((column :FRUIT/ID) row)))
(is (= "Apple" ((column :FRUIT/NAME) row)))))
(testing "unqualified row builder"
(let [row (p/-execute-one (ds)
["select * from fruit where id = ?" 2]
{:builder-fn opt/as-unqualified-maps})]
(is (map? row))
(is (not (contains? row (column :COST))))
(is (= 2 ((column :ID) row)))
(is (= "Banana" ((column :NAME) row)))))
(testing "lower-case row builder"
(let [row (p/-execute-one (ds)
["select * from fruit where id = ?" 3]
(assoc (default-options)
:builder-fn opt/as-lower-maps))]
(is (map? row))
(is (not (contains? row :fruit/appearance)))
(is (= 3 (:fruit/id row)))
(is (= "Peach" (:fruit/name row)))))
(testing "unqualified lower-case row builder"
(let [row (p/-execute-one (ds)
["select * from fruit where id = ?" 4]
{:builder-fn opt/as-unqualified-lower-maps})]
(is (map? row))
(is (= 4 (:id row)))
(is (= "Orange" (:name row)))))
(testing "custom row builder"
(let [row (p/-execute-one (ds)
["select * from fruit where id = ?" 3]
(assoc (default-options)
:builder-fn opt/as-modified-maps
:label-fn str/lower-case
:qualifier-fn identity))]
(is (map? row))
(is (not (contains? row (column :FRUIT/appearance))))
(is (= 3 ((column :FRUIT/id) row)))
(is (= "Peach" ((column :FRUIT/name) row))))))
(defn- default-column-reader
[^ResultSet rs ^ResultSetMetaData rsmeta ^Integer i]
(.getObject rs i))
(deftest test-map-row-adapter
(testing "default row builder"
(let [row (p/-execute-one (ds)
["select * from fruit where id = ?" 1]
(assoc (default-options)
:builder-fn (opt/as-maps-adapter
opt/as-maps
default-column-reader)))]
(is (map? row))
(is (not (contains? row (column :FRUIT/GRADE))))
(is (= 1 ((column :FRUIT/ID) row)))
(is (= "Apple" ((column :FRUIT/NAME) row)))))
(testing "unqualified row builder"
(let [row (p/-execute-one (ds)
["select * from fruit where id = ?" 2]
{:builder-fn (opt/as-maps-adapter
opt/as-unqualified-maps
default-column-reader)})]
(is (map? row))
(is (not (contains? row (column :COST))))
(is (= 2 ((column :ID) row)))
(is (= "Banana" ((column :NAME) row)))))
(testing "lower-case row builder"
(let [row (p/-execute-one (ds)
["select * from fruit where id = ?" 3]
(assoc (default-options)
:builder-fn (opt/as-maps-adapter
opt/as-lower-maps
default-column-reader)))]
(is (map? row))
(is (not (contains? row :fruit/appearance)))
(is (= 3 (:fruit/id row)))
(is (= "Peach" (:fruit/name row)))))
(testing "unqualified lower-case row builder"
(let [row (p/-execute-one (ds)
["select * from fruit where id = ?" 4]
{:builder-fn (opt/as-maps-adapter
opt/as-unqualified-lower-maps
default-column-reader)})]
(is (map? row))
(is (= 4 (:id row)))
(is (= "Orange" (:name row)))))
(testing "custom row builder"
(let [row (p/-execute-one (ds)
["select * from fruit where id = ?" 3]
(assoc (default-options)
:builder-fn (opt/as-maps-adapter
opt/as-modified-maps
default-column-reader)
:label-fn str/lower-case
:qualifier-fn identity))]
(is (map? row))
(is (not (contains? row (column :FRUIT/appearance))))
(is (= 3 ((column :FRUIT/id) row)))
(is (= "Peach" ((column :FRUIT/name) row))))))
next-jdbc-1.3.955/test/next/jdbc/plan_test.clj 0000664 0000000 0000000 00000005545 14700603111 0021100 0 ustar 00root root 0000000 0000000 ;; copyright (c) 2020-2021 Sean Corfield, all rights reserved
(ns next.jdbc.plan-test
"Tests for the plan helpers."
(:require [clojure.test :refer [deftest is use-fixtures]]
[next.jdbc.plan :as plan]
[next.jdbc.specs :as specs]
[next.jdbc.test-fixtures
:refer [with-test-db ds]]
[clojure.string :as str]))
(set! *warn-on-reflection* true)
;; around each test because of the folding tests using 1,000 rows
(use-fixtures :each with-test-db)
(specs/instrument)
(deftest select-one!-tests
(is (= {:id 1}
(plan/select-one! (ds) [:id] ["select * from fruit order by id"])))
(is (= 1
(plan/select-one! (ds) :id ["select * from fruit order by id"])))
(is (= "Banana"
(plan/select-one! (ds) :name ["select * from fruit where id = ?" 2])))
(is (= [1 "Apple"]
(plan/select-one! (ds) (juxt :id :name)
["select * from fruit order by id"])))
(is (= {:id 1 :name "Apple"}
(plan/select-one! (ds) #(select-keys % [:id :name])
["select * from fruit order by id"]))))
(deftest select-vector-tests
(is (= [{:id 1} {:id 2} {:id 3} {:id 4}]
(plan/select! (ds) [:id] ["select * from fruit order by id"])))
(is (= [1 2 3 4]
(plan/select! (ds) :id ["select * from fruit order by id"])))
(is (= ["Banana"]
(plan/select! (ds) :name ["select * from fruit where id = ?" 2])))
(is (= [[2 "Banana"]]
(plan/select! (ds) (juxt :id :name)
["select * from fruit where id = ?" 2])))
(is (= [{:id 2 :name "Banana"}]
(plan/select! (ds) [:id :name]
["select * from fruit where id = ?" 2]))))
(deftest select-set-tests
(is (= #{{:id 1} {:id 2} {:id 3} {:id 4}}
(plan/select! (ds) [:id] ["select * from fruit order by id"]
{:into #{}})))
(is (= #{1 2 3 4}
(plan/select! (ds) :id ["select * from fruit order by id"]
{:into #{}}))))
(deftest select-map-tests
(is (= {1 "Apple", 2 "Banana", 3 "Peach", 4 "Orange"}
(plan/select! (ds) (juxt :id :name) ["select * from fruit order by id"]
{:into {}}))))
(deftest select-issue-227
(is (= ["Apple"]
(plan/select! (ds) :name ["select * from fruit where id = ?" 1]
{:column-fn #(str/replace % "-" "_")})))
(is (= ["Apple"]
(plan/select! (ds) :foo/name ["select * from fruit where id = ?" 1]
{:column-fn #(str/replace % "-" "_")})))
(is (= ["Apple"]
(plan/select! (ds) #(get % "name") ["select * from fruit where id = ?" 1]
{:column-fn #(str/replace % "-" "_")})))
(is (= [["Apple"]]
(plan/select! (ds) (juxt :name) ["select * from fruit where id = ?" 1]
{:column-fn #(str/replace % "-" "_")}))))
next-jdbc-1.3.955/test/next/jdbc/prepare_test.clj 0000664 0000000 0000000 00000015116 14700603111 0021577 0 ustar 00root root 0000000 0000000 ;; copyright (c) 2019-2021 Sean Corfield, all rights reserved
(ns next.jdbc.prepare-test
"Stub test namespace for PreparedStatement creation etc.
Most of this functionality is core to all of the higher-level stuff
so it gets tested that way.
The tests for the deprecated version of `execute-batch!` are here
as a guard against regressions."
(:require [clojure.test :refer [deftest is testing use-fixtures]]
[next.jdbc :as jdbc]
[next.jdbc.test-fixtures
:refer [with-test-db ds jtds? mssql? sqlite?]]
[next.jdbc.prepare :as prep]
[next.jdbc.specs :as specs]))
(set! *warn-on-reflection* true)
(use-fixtures :once with-test-db)
(specs/instrument)
(deftest execute-batch-tests
(testing "simple batch insert"
(is (= [1 1 1 1 1 1 1 1 1 13]
(jdbc/with-transaction [t (ds) {:rollback-only true}]
(with-open [ps (jdbc/prepare t ["
INSERT INTO fruit (name, appearance) VALUES (?,?)
"])]
(let [result (prep/execute-batch! ps [["fruit1" "one"]
["fruit2" "two"]
["fruit3" "three"]
["fruit4" "four"]
["fruit5" "five"]
["fruit6" "six"]
["fruit7" "seven"]
["fruit8" "eight"]
["fruit9" "nine"]])]
(conj result (count (jdbc/execute! t ["select * from fruit"]))))))))
(is (= 4 (count (jdbc/execute! (ds) ["select * from fruit"])))))
(testing "small batch insert"
(is (= [1 1 1 1 1 1 1 1 1 13]
(jdbc/with-transaction [t (ds) {:rollback-only true}]
(with-open [ps (jdbc/prepare t ["
INSERT INTO fruit (name, appearance) VALUES (?,?)
"])]
(let [result (prep/execute-batch! ps [["fruit1" "one"]
["fruit2" "two"]
["fruit3" "three"]
["fruit4" "four"]
["fruit5" "five"]
["fruit6" "six"]
["fruit7" "seven"]
["fruit8" "eight"]
["fruit9" "nine"]]
{:batch-size 3})]
(conj result (count (jdbc/execute! t ["select * from fruit"]))))))))
(is (= 4 (count (jdbc/execute! (ds) ["select * from fruit"])))))
(testing "big batch insert"
(is (= [1 1 1 1 1 1 1 1 1 13]
(jdbc/with-transaction [t (ds) {:rollback-only true}]
(with-open [ps (jdbc/prepare t ["
INSERT INTO fruit (name, appearance) VALUES (?,?)
"])]
(let [result (prep/execute-batch! ps [["fruit1" "one"]
["fruit2" "two"]
["fruit3" "three"]
["fruit4" "four"]
["fruit5" "five"]
["fruit6" "six"]
["fruit7" "seven"]
["fruit8" "eight"]
["fruit9" "nine"]]
{:batch-size 8})]
(conj result (count (jdbc/execute! t ["select * from fruit"]))))))))
(is (= 4 (count (jdbc/execute! (ds) ["select * from fruit"])))))
(testing "large batch insert"
(when-not (or (jtds?) (sqlite?))
(is (= [1 1 1 1 1 1 1 1 1 13]
(jdbc/with-transaction [t (ds) {:rollback-only true}]
(with-open [ps (jdbc/prepare t ["
INSERT INTO fruit (name, appearance) VALUES (?,?)
"])]
(let [result (prep/execute-batch! ps [["fruit1" "one"]
["fruit2" "two"]
["fruit3" "three"]
["fruit4" "four"]
["fruit5" "five"]
["fruit6" "six"]
["fruit7" "seven"]
["fruit8" "eight"]
["fruit9" "nine"]]
{:batch-size 4
:large true})]
(conj result (count (jdbc/execute! t ["select * from fruit"]))))))))
(is (= 4 (count (jdbc/execute! (ds) ["select * from fruit"]))))))
(testing "return generated keys"
(when-not (or (mssql?) (sqlite?))
(let [results
(jdbc/with-transaction [t (ds) {:rollback-only true}]
(with-open [ps (jdbc/prepare t ["
INSERT INTO fruit (name, appearance) VALUES (?,?)
"]
{:return-keys true})]
(let [result (prep/execute-batch! ps [["fruit1" "one"]
["fruit2" "two"]
["fruit3" "three"]
["fruit4" "four"]
["fruit5" "five"]
["fruit6" "six"]
["fruit7" "seven"]
["fruit8" "eight"]
["fruit9" "nine"]]
{:batch-size 4
:return-generated-keys true})]
(conj result (count (jdbc/execute! t ["select * from fruit"]))))))]
(is (= 13 (last results)))
(is (every? map? (butlast results)))
;; Derby and SQLite only return one generated key per batch so there
;; are only three keys, plus the overall count here:
(is (< 3 (count results))))
(is (= 4 (count (jdbc/execute! (ds) ["select * from fruit"])))))))
next-jdbc-1.3.955/test/next/jdbc/protocols_test.clj 0000664 0000000 0000000 00000000513 14700603111 0022160 0 ustar 00root root 0000000 0000000 ;; copyright (c) 2019-2021 Sean Corfield, all rights reserved
(ns next.jdbc.protocols-test
"Stub test namespace for low-level protocols. Nothing can really be tested
at this level tho'..."
(:require [clojure.test :refer [deftest is testing]]
[next.jdbc.protocols :refer :all]))
(set! *warn-on-reflection* true)
next-jdbc-1.3.955/test/next/jdbc/quoted_test.clj 0000664 0000000 0000000 00000002113 14700603111 0021433 0 ustar 00root root 0000000 0000000 ;; copyright (c) 2019-2021 Sean Corfield, all rights reserved
(ns next.jdbc.quoted-test
"Basic tests for quoting strategies. These are also tested indirectly
via the next.jdbc.sql tests."
(:require [clojure.test :refer [deftest are testing]]
[next.jdbc.quoted :refer [ansi mysql sql-server oracle postgres
schema]]))
(set! *warn-on-reflection* true)
(deftest basic-quoting
(are [quote-fn quoted] (= (quote-fn "x") quoted)
ansi "\"x\""
mysql "`x`"
sql-server "[x]"
oracle "\"x\""
postgres "\"x\""))
(deftest schema-quoting
(testing "verify non-schema behavior"
(are [quote-fn quoted] (= (quote-fn "x.y") quoted)
ansi "\"x.y\""
mysql "`x.y`"
sql-server "[x.y]"
oracle "\"x.y\""
postgres "\"x.y\""))
(testing "verify schema behavior"
(are [quote-fn quoted] (= ((schema quote-fn) "x.y") quoted)
ansi "\"x\".\"y\""
mysql "`x`.`y`"
sql-server "[x].[y]"
oracle "\"x\".\"y\""
postgres "\"x\".\"y\"")))
next-jdbc-1.3.955/test/next/jdbc/result_set_test.clj 0000664 0000000 0000000 00000057740 14700603111 0022343 0 ustar 00root root 0000000 0000000 ;; copyright (c) 2019-2021 Sean Corfield, all rights reserved
(ns next.jdbc.result-set-test
"Test namespace for the result set functions.
What's left to be tested:
* ReadableColumn protocol extension point"
(:require [clojure.core.protocols :as core-p]
[clojure.datafy :as d]
[clojure.string :as str]
[clojure.test :refer [deftest is testing use-fixtures]]
[next.jdbc.protocols :as p]
[next.jdbc.result-set :as rs]
[next.jdbc.specs :as specs]
[next.jdbc.test-fixtures :refer [with-test-db ds column
default-options
derby? mssql? mysql? postgres?]])
(:import (java.sql ResultSet ResultSetMetaData)))
(set! *warn-on-reflection* true)
(use-fixtures :once with-test-db)
(specs/instrument)
(deftest test-datafy-nav
(testing "default schema"
(let [connectable (ds)
test-row (rs/datafiable-row {:TABLE/FRUIT_ID 1} connectable
(default-options))
data (d/datafy test-row)
v (get data :TABLE/FRUIT_ID)]
;; check datafication is sane
(is (= 1 v))
(let [object (d/nav data :table/fruit_id v)]
;; check nav produces a single map with the expected key/value data
(is (= 1 ((column :FRUIT/ID) object)))
(is (= "Apple" ((column :FRUIT/NAME) object))))))
(testing "custom schema *-to-1"
(let [connectable (ds)
test-row (rs/datafiable-row {:foo/bar 2} connectable
(assoc (default-options)
:schema {:foo/bar :fruit/id}))
data (d/datafy test-row)
v (get data :foo/bar)]
;; check datafication is sane
(is (= 2 v))
(let [object (d/nav data :foo/bar v)]
;; check nav produces a single map with the expected key/value data
(is (= 2 ((column :FRUIT/ID) object)))
(is (= "Banana" ((column :FRUIT/NAME) object))))))
(testing "custom schema *-to-many"
(let [connectable (ds)
test-row (rs/datafiable-row {:foo/bar 3} connectable
(assoc (default-options)
:schema {:foo/bar [:fruit/id]}))
data (d/datafy test-row)
v (get data :foo/bar)]
;; check datafication is sane
(is (= 3 v))
(let [object (d/nav data :foo/bar v)]
;; check nav produces a result set with the expected key/value data
(is (vector? object))
(is (= 3 ((column :FRUIT/ID) (first object))))
(is (= "Peach" ((column :FRUIT/NAME) (first object)))))))
(testing "legacy schema tuples"
(let [connectable (ds)
test-row (rs/datafiable-row {:foo/bar 2} connectable
(assoc (default-options)
:schema {:foo/bar [:fruit :id]}))
data (d/datafy test-row)
v (get data :foo/bar)]
;; check datafication is sane
(is (= 2 v))
(let [object (d/nav data :foo/bar v)]
;; check nav produces a single map with the expected key/value data
(is (= 2 ((column :FRUIT/ID) object)))
(is (= "Banana" ((column :FRUIT/NAME) object)))))
(let [connectable (ds)
test-row (rs/datafiable-row {:foo/bar 3} connectable
(assoc (default-options)
:schema {:foo/bar [:fruit :id :many]}))
data (d/datafy test-row)
v (get data :foo/bar)]
;; check datafication is sane
(is (= 3 v))
(let [object (d/nav data :foo/bar v)]
;; check nav produces a result set with the expected key/value data
(is (vector? object))
(is (= 3 ((column :FRUIT/ID) (first object))))
(is (= "Peach" ((column :FRUIT/NAME) (first object))))))))
(deftest test-map-row-builder
(testing "default row builder"
(let [row (p/-execute-one (ds)
["select * from fruit where id = ?" 1]
(default-options))]
(is (map? row))
(is (contains? row (column :FRUIT/GRADE)))
(is (nil? ((column :FRUIT/GRADE) row)))
(is (= 1 ((column :FRUIT/ID) row)))
(is (= "Apple" ((column :FRUIT/NAME) row))))
(let [rs (p/-execute-all (ds)
["select * from fruit order by id"]
(default-options))]
(is (every? map? rs))
(is (= 1 ((column :FRUIT/ID) (first rs))))
(is (= "Apple" ((column :FRUIT/NAME) (first rs))))
(is (= 4 ((column :FRUIT/ID) (last rs))))
(is (= "Orange" ((column :FRUIT/NAME) (last rs))))))
(testing "unqualified row builder"
(let [row (p/-execute-one (ds)
["select * from fruit where id = ?" 2]
{:builder-fn rs/as-unqualified-maps})]
(is (map? row))
(is (contains? row (column :COST)))
(is (nil? ((column :COST) row)))
(is (= 2 ((column :ID) row)))
(is (= "Banana" ((column :NAME) row)))))
(testing "lower-case row builder"
(let [row (p/-execute-one (ds)
["select * from fruit where id = ?" 3]
(assoc (default-options)
:builder-fn rs/as-lower-maps))]
(is (map? row))
(is (contains? row :fruit/appearance))
(is (nil? (:fruit/appearance row)))
(is (= 3 (:fruit/id row)))
(is (= "Peach" (:fruit/name row)))))
(testing "unqualified lower-case row builder"
(let [row (p/-execute-one (ds)
["select * from fruit where id = ?" 4]
{:builder-fn rs/as-unqualified-lower-maps})]
(is (map? row))
(is (= 4 (:id row)))
(is (= "Orange" (:name row)))))
(testing "kebab-case row builder"
(let [row (p/-execute-one (ds)
["select id,name,appearance as looks_like from fruit where id = ?" 3]
(assoc (default-options)
:builder-fn rs/as-kebab-maps))]
(is (map? row))
(is (contains? row :fruit/looks-like))
(is (nil? (:fruit/looks-like row)))
(is (= 3 (:fruit/id row)))
(is (= "Peach" (:fruit/name row)))))
(testing "unqualified kebab-case row builder"
(let [row (p/-execute-one (ds)
["select id,name,appearance as looks_like from fruit where id = ?" 4]
{:builder-fn rs/as-unqualified-kebab-maps})]
(is (map? row))
(is (contains? row :looks-like))
(is (= "juicy" (:looks-like row)))
(is (= 4 (:id row)))
(is (= "Orange" (:name row)))))
(testing "custom row builder 1"
(let [row (p/-execute-one (ds)
["select fruit.*, id + 100 as newid from fruit where id = ?" 3]
(assoc (default-options)
:builder-fn rs/as-modified-maps
:label-fn str/lower-case
:qualifier-fn identity))]
(is (map? row))
(is (contains? row (column :FRUIT/appearance)))
(is (nil? ((column :FRUIT/appearance) row)))
(is (= 3 ((column :FRUIT/id) row)))
(is (= 103 (:newid row))) ; no table name here
(is (= "Peach" ((column :FRUIT/name) row)))))
(testing "custom row builder 2"
(let [row (p/-execute-one (ds)
["select fruit.*, id + 100 as newid from fruit where id = ?" 3]
(assoc (default-options)
:builder-fn rs/as-modified-maps
:label-fn str/lower-case
:qualifier-fn (constantly "vegetable")))]
(is (map? row))
(is (contains? row :vegetable/appearance))
(is (nil? (:vegetable/appearance row)))
(is (= 3 (:vegetable/id row)))
(is (= 103 (:vegetable/newid row))) ; constant qualifier here
(is (= "Peach" (:vegetable/name row)))))
(testing "adapted row builder"
(let [row (p/-execute-one (ds)
["select * from fruit where id = ?" 3]
(assoc
(default-options)
:builder-fn (rs/as-maps-adapter
rs/as-modified-maps
(fn [^ResultSet rs
^ResultSetMetaData rsmeta
^Integer i]
(condp = (.getColumnType rsmeta i)
java.sql.Types/VARCHAR
(.getString rs i)
java.sql.Types/INTEGER
(.getLong rs i)
(.getObject rs i))))
:label-fn str/lower-case
:qualifier-fn identity))]
(is (map? row))
(is (contains? row (column :FRUIT/appearance)))
(is (nil? ((column :FRUIT/appearance) row)))
(is (= 3 ((column :FRUIT/id) row)))
(is (= "Peach" ((column :FRUIT/name) row))))
(let [builder (rs/as-maps-adapter
rs/as-modified-maps
(fn [^ResultSet rs _ ^Integer i]
(.getObject rs i)))
row (p/-execute-one (ds)
["select * from fruit where id = ?" 3]
(assoc
(default-options)
:builder-fn (rs/as-maps-adapter
builder
(fn [^ResultSet rs
^ResultSetMetaData rsmeta
^Integer i]
(condp = (.getColumnType rsmeta i)
java.sql.Types/VARCHAR
(.getString rs i)
java.sql.Types/INTEGER
(.getLong rs i)
(.getObject rs i))))
:label-fn str/lower-case
:qualifier-fn identity))]
(is (map? row))
(is (contains? row (column :FRUIT/appearance)))
(is (nil? ((column :FRUIT/appearance) row)))
(is (= 3 ((column :FRUIT/id) row)))
(is (= "Peach" ((column :FRUIT/name) row))))))
(deftest test-row-number
;; two notes here: we use as-arrays as a nod to issue #110 to make
;; sure that actually works; also Apache Derby is the only database
;; (that we test against) to restrict .getRow() calls to scroll cursors
(testing "row-numbers on bare abstraction"
(is (= [1 2 3]
(into [] (map rs/row-number)
(p/-execute (ds) ["select * from fruit where id < ?" 4]
;; we do not need a real builder here...
(cond-> {:builder-fn (constantly nil)}
(derby?)
(assoc :concurrency :read-only
:cursors :close
:result-type :scroll-insensitive)))))))
(testing "row-numbers on realized row"
(is (= [1 2 3]
(into [] (comp (map #(rs/datafiable-row % (ds) {}))
(map rs/row-number))
(p/-execute (ds) ["select * from fruit where id < ?" 4]
;; ...but datafiable-row requires a real builder
(cond-> {:builder-fn rs/as-arrays}
(derby?)
(assoc :concurrency :read-only
:cursors :close
:result-type :scroll-insensitive))))))))
(deftest test-column-names
(testing "column-names on bare abstraction"
(is (= #{"id" "appearance" "grade" "cost" "name"}
(reduce (fn [_ row]
(-> row
(->> (rs/column-names)
(map (comp str/lower-case name))
(set)
(reduced))))
nil
(p/-execute (ds) ["select * from fruit where id < ?" 4]
;; column-names require a real builder
{:builder-fn rs/as-arrays})))))
(testing "column-names on realized row"
(is (= #{"id" "appearance" "grade" "cost" "name"}
(reduce (fn [_ row]
(-> row
(rs/datafiable-row (ds) {})
(->> (rs/column-names)
(map (comp str/lower-case name))
(set)
(reduced))))
nil
(p/-execute (ds) ["select * from fruit where id < ?" 4]
{:builder-fn rs/as-arrays}))))))
(deftest test-over-partition-all
;; this verifies that InspectableMapifiedResultSet survives partition-all
(testing "row-numbers on partitioned rows"
(is (= [[1 2] [3 4]]
(into [] (comp (map #(rs/datafiable-row % (ds) %))
(partition-all 2)
(map #(map rs/row-number %)))
(p/-execute (ds) ["select * from fruit"]
(cond-> {:builder-fn rs/as-arrays}
(derby?)
(assoc :concurrency :read-only
:cursors :close
:result-type :scroll-insensitive))))))))
(deftest test-mapify
(testing "no row builder is used"
(is (= [true]
(into [] (map map?) ; it looks like a real map now
(p/-execute (ds) ["select * from fruit where id = ?" 1]
{:builder-fn (constantly nil)}))))
(is (= ["Apple"]
(into [] (map :name) ; keyword selection works
(p/-execute (ds) ["select * from fruit where id = ?" 1]
{:builder-fn (constantly nil)}))))
(is (= [[2 [:name "Banana"]]]
(into [] (map (juxt #(get % "id") ; get by string key works
#(find % :name))) ; get MapEntry works
(p/-execute (ds) ["select * from fruit where id = ?" 2]
{:builder-fn (constantly nil)}))))
(is (= [{:id 3 :name "Peach"}]
(into [] (map #(select-keys % [:id :name])) ; select-keys works
(p/-execute (ds) ["select * from fruit where id = ?" 3]
{:builder-fn (constantly nil)}))))
(is (= [[:orange 4]]
(into [] (map #(vector (if (contains? % :name) ; contains works
(keyword (str/lower-case (:name %)))
:unnamed)
(get % :id 0))) ; get with not-found works
(p/-execute (ds) ["select * from fruit where id = ?" 4]
{:builder-fn (constantly nil)}))))
(is (= [{}]
(into [] (map empty) ; return empty map without building
(p/-execute (ds) ["select * from fruit where id = ?" 1]
{:builder-fn (constantly nil)})))))
(testing "count does not build a map"
(let [count-builder (fn [_1 _2]
(reify rs/RowBuilder
(column-count [_] 13)))]
(is (= [13]
(into [] (map count) ; count relies on columns, not row fields
(p/-execute (ds) ["select * from fruit where id = ?" 1]
{:builder-fn count-builder}))))))
(testing "assoc, dissoc, cons, seq, and = build maps"
(is (map? (reduce (fn [_ row] (reduced (assoc row :x 1)))
nil
(p/-execute (ds) ["select * from fruit"] {}))))
(is (= 6 (count (reduce (fn [_ row] (reduced (assoc row :x 1)))
nil
(p/-execute (ds) ["select * from fruit"] {})))))
(is (map? (reduce (fn [_ row] (reduced
(dissoc row (column :FRUIT/NAME))))
nil
(p/-execute (ds) ["select * from fruit"]
(default-options)))))
(is (= 4 (count (reduce (fn [_ row] (reduced
(dissoc row (column :FRUIT/NAME))))
nil
(p/-execute (ds) ["select * from fruit"]
(default-options))))))
(is (seq? (reduce (fn [_ row] (reduced (seq row)))
nil
(p/-execute (ds) ["select * from fruit"] {}))))
(is (seq? (reduce (fn [_ row] (reduced (cons :seq row)))
nil
(p/-execute (ds) ["select * from fruit"] {}))))
(is (= :seq (first (reduce (fn [_ row] (reduced (cons :seq row)))
nil
(p/-execute (ds) ["select * from fruit"] {})))))
(is (false? (reduce (fn [_ row] (reduced (= row {})))
nil
(p/-execute (ds) ["select * from fruit"] {}))))
(is (map-entry? (second (reduce (fn [_ row] (reduced (cons :seq row)))
nil
(p/-execute (ds) ["select * from fruit"] {})))))
(is (every? map-entry? (reduce (fn [_ row] (reduced (seq row)))
nil
(p/-execute (ds) ["select * from fruit"] {}))))
(is (map? (reduce (fn [_ row] (reduced (conj row {:a 1})))
nil
(p/-execute (ds) ["select * from fruit"] {}))))
(is (map? (reduce (fn [_ row] (reduced (conj row [:a 1])))
nil
(p/-execute (ds) ["select * from fruit"] {}))))
(is (map? (reduce (fn [_ row] (reduced (conj row {:a 1 :b 2})))
nil
(p/-execute (ds) ["select * from fruit"] {}))))
(is (= 1 (:a (reduce (fn [_ row] (reduced (conj row {:a 1})))
nil
(p/-execute (ds) ["select * from fruit"] {})))))
(is (= 1 (:a (reduce (fn [_ row] (reduced (conj row [:a 1])))
nil
(p/-execute (ds) ["select * from fruit"] {})))))
(is (= 1 (:a (reduce (fn [_ row] (reduced (conj row {:a 1 :b 2})))
nil
(p/-execute (ds) ["select * from fruit"] {})))))
(is (= 2 (:b (reduce (fn [_ row] (reduced (conj row {:a 1 :b 2})))
nil
(p/-execute (ds) ["select * from fruit"] {})))))
(is (vector? (reduce (fn [_ row] (reduced (conj row :a)))
nil
(p/-execute (ds) ["select * from fruit"]
{:builder-fn rs/as-arrays}))))
(is (= :a (peek (reduce (fn [_ row] (reduced (conj row :a)))
nil
(p/-execute (ds) ["select * from fruit"]
{:builder-fn rs/as-arrays})))))
(is (= :b (peek (reduce (fn [_ row] (reduced (conj row :a :b)))
nil
(p/-execute (ds) ["select * from fruit"]
{:builder-fn rs/as-arrays}))))))
(testing "datafiable-row builds map; with metadata"
(is (map? (reduce (fn [_ row] (reduced (rs/datafiable-row row (ds) {})))
nil
(p/-execute (ds) ["select * from fruit"] {}))))
(is (contains? (meta (reduce (fn [_ row] (reduced (rs/datafiable-row row (ds) {})))
nil
(p/-execute (ds) ["select * from fruit"] {})))
`core-p/datafy))))
;; test that we can create a record-based result set builder:
(defrecord Fruit [id name appearance cost grade])
(defn fruit-builder [^ResultSet rs ^ResultSetMetaData rsmeta]
(reify
rs/RowBuilder
(->row [_] (->Fruit (.getObject rs "id")
(.getObject rs "name")
(.getObject rs "appearance")
(.getObject rs "cost")
(.getObject rs "grade")))
(column-count [_] 0) ; no need to iterate over columns
(with-column [_ row i] row)
(with-column-value [_ row col v] row)
(row! [_ row] row)
rs/ResultSetBuilder
(->rs [_] (transient []))
(with-row [_ rs row] (conj! rs row))
(rs! [_ rs] (persistent! rs))
clojure.lang.ILookup ; only supports :cols and :rsmeta
(valAt [this k] (get this k nil))
(valAt [this k not-found]
(case k
:cols [:id :name :appearance :cost :grade]
:rsmeta rsmeta
not-found))))
(deftest custom-map-builder
(let [row (p/-execute-one (ds)
["select * from fruit where appearance = ?" "red"]
{:builder-fn fruit-builder})]
(is (instance? Fruit row))
(is (= 1 (:id row))))
(let [rs (p/-execute-all (ds)
["select * from fruit where appearance = ?" "red"]
{:builder-fn fruit-builder})]
(is (every? #(instance? Fruit %) rs))
(is (= 1 (count rs)))
(is (= 1 (:id (first rs))))))
(deftest metadata-result-set
(let [metadata (with-open [con (p/get-connection (ds) {})]
(-> (.getMetaData con)
(.getTables nil nil nil (into-array ["TABLE" "VIEW"]))
(rs/datafiable-result-set (ds) {})))]
(is (vector? metadata))
(is (map? (first metadata)))
;; we should find :something/table_name with a value of "fruit"
;; may be upper/lower-case, could have any qualifier
(is (some (fn [row]
(some #(and (= "table_name" (-> % key name str/lower-case))
(= "fruit" (-> % val name str/lower-case)))
row))
metadata))))
(deftest clob-reading
(when-not (or (mssql?) (mysql?) (postgres?)) ; no clob in these
(with-open [con (p/get-connection (ds) {})]
(try
(p/-execute-one con ["DROP TABLE CLOBBER"] {})
(catch Exception _))
(p/-execute-one con [(str "
CREATE TABLE CLOBBER (
ID INTEGER,
STUFF CLOB
)")]
{})
(p/-execute-one con
[(str "insert into clobber (id, stuff)"
"values (?,?), (?,?)")
1 "This is some long string"
2 "This is another long string"]
{})
(is (= "This is some long string"
(-> (p/-execute-all con
["select * from clobber where id = ?" 1]
{:builder-fn (rs/as-maps-adapter
rs/as-unqualified-lower-maps
rs/clob-column-reader)})
(first)
:stuff))))))
(deftest test-get-n-array
(testing "get n on bare abstraction over arrays"
(is (= [1 2 3]
(into [] (map #(get % 0))
(p/-execute (ds) ["select id from fruit where id < ?" 4]
{:builder-fn rs/as-arrays})))))
(testing "nth on bare abstraction over arrays"
(is (= [1 2 3]
(into [] (map #(nth % 0))
(p/-execute (ds) ["select id from fruit where id < ?" 4]
{:builder-fn rs/as-arrays}))))))
next-jdbc-1.3.955/test/next/jdbc/specs_test.clj 0000664 0000000 0000000 00000000560 14700603111 0021253 0 ustar 00root root 0000000 0000000 ;; copyright (c) 2019-2021 Sean Corfield, all rights reserved
(ns next.jdbc.specs-test
"Stub test namespace for the specs.
The specs are used (and 'tested') as part of the tests for the
next.jdbc and next.jdbc.sql namespaces."
(:require [clojure.test :refer [deftest is testing]]
[next.jdbc.specs :refer :all]))
(set! *warn-on-reflection* true)
next-jdbc-1.3.955/test/next/jdbc/sql/ 0000775 0000000 0000000 00000000000 14700603111 0017203 5 ustar 00root root 0000000 0000000 next-jdbc-1.3.955/test/next/jdbc/sql/builder_test.clj 0000664 0000000 0000000 00000034245 14700603111 0022372 0 ustar 00root root 0000000 0000000 ;; copyright (c) 2019-2021 Sean Corfield, all rights reserved
(ns next.jdbc.sql.builder-test
"Tests for the SQL string building functions in next.jdbc.sql.builder."
(:require [clojure.test :refer [deftest is testing]]
[next.jdbc.quoted :refer [mysql sql-server]]
[next.jdbc.sql.builder :as builder]))
(set! *warn-on-reflection* true)
(deftest test-by-keys
(testing ":where clause"
(is (= (builder/by-keys {:a nil :b 42 :c "s"} :where {})
["WHERE a IS NULL AND b = ? AND c = ?" 42 "s"]))
(is (= (builder/by-keys {:q/a nil :q/b 42 :q/c "s"} :where {})
["WHERE a IS NULL AND b = ? AND c = ?" 42 "s"]))
(is (= (builder/by-keys {:q/a nil :q/b 42 :q/c "s"} :where
{:name-fn builder/qualified-name
:column-fn mysql})
["WHERE `q/a` IS NULL AND `q/b` = ? AND `q/c` = ?" 42 "s"])))
(testing ":set clause"
(is (= (builder/by-keys {:a nil :b 42 :c "s"} :set {})
["SET a = ?, b = ?, c = ?" nil 42 "s"]))
(is (= (builder/by-keys {:q/a nil :q/b 42 :q/c "s"} :set {})
["SET a = ?, b = ?, c = ?" nil 42 "s"]))
(is (= (builder/by-keys {:q/a nil :q/b 42 :q/c "s"} :set
{:name-fn builder/qualified-name
:column-fn mysql})
["SET `q/a` = ?, `q/b` = ?, `q/c` = ?" nil 42 "s"]))))
(deftest test-as-cols
(is (= (builder/as-cols [:a :b :c] {})
"a, b, c"))
(is (= (builder/as-cols [[:a :aa] :b ["count(*)" :c]] {})
"a AS aa, b, count(*) AS c"))
(is (= (builder/as-cols [[:a :aa] :b ["count(*)" :c]] {:column-fn mysql})
"`a` AS `aa`, `b`, count(*) AS `c`"))
(is (= (builder/as-cols [:q/a :q/b :q/c] {})
"a, b, c"))
(is (= (builder/as-cols [[:q/a :q/aa] :q/b ["count(*)" :q/c]] {})
"a AS aa, b, count(*) AS c"))
(is (= (builder/as-cols [[:q/a :q/aa] :q/b ["count(*)" :q/c]] {:column-fn mysql})
"`a` AS `aa`, `b`, count(*) AS `c`"))
(is (= (builder/as-cols [:q/a :q/b :q/c]
{:name-fn builder/qualified-name
:column-fn mysql})
"`q/a`, `q/b`, `q/c`"))
(is (= (builder/as-cols [[:q/a :q/aa] :q/b ["count(*)" :q/c]]
{:name-fn builder/qualified-name
:column-fn mysql})
"`q/a` AS `q/aa`, `q/b`, count(*) AS `q/c`")))
(deftest test-as-keys
(is (= (builder/as-keys {:a nil :b 42 :c "s"} {})
"a, b, c"))
(is (= (builder/as-keys {:q/a nil :q/b 42 :q/c "s"} {})
"a, b, c"))
(is (= (builder/as-keys {:q/a nil :q/b 42 :q/c "s"}
{:name-fn builder/qualified-name
:column-fn sql-server})
"[q/a], [q/b], [q/c]")))
(deftest test-as-?
(is (= (builder/as-? {:a nil :b 42 :c "s"} {})
"?, ?, ?"))
(is (= (builder/as-? {:q/a nil :q/b 42 :q/c "s"} {})
"?, ?, ?"))
(is (= (builder/as-? {:q/a nil :q/b 42 :q/c "s"}
{:name-fn builder/qualified-name
:column-fn sql-server})
"?, ?, ?")))
(deftest test-for-query
(testing "by example"
(is (= (builder/for-query
:user
{:id 9}
{:table-fn sql-server :column-fn mysql :order-by [:a [:b :desc]]})
["SELECT * FROM [user] WHERE `id` = ? ORDER BY `a`, `b` DESC" 9]))
(is (= (builder/for-query :user {:id nil} {:table-fn sql-server :column-fn mysql})
["SELECT * FROM [user] WHERE `id` IS NULL"]))
(is (= (builder/for-query :user
{:id nil}
{:table-fn sql-server :column-fn mysql
:suffix "FOR UPDATE"})
["SELECT * FROM [user] WHERE `id` IS NULL FOR UPDATE"]))
(is (= (builder/for-query
:t/user
{:q/id 9}
{:table-fn sql-server :column-fn mysql :order-by [:x/a [:x/b :desc]]})
["SELECT * FROM [user] WHERE `id` = ? ORDER BY `a`, `b` DESC" 9]))
(is (= (builder/for-query :t/user {:q/id nil} {:table-fn sql-server :column-fn mysql})
["SELECT * FROM [user] WHERE `id` IS NULL"]))
(is (= (builder/for-query :t/user
{:q/id nil}
{:table-fn sql-server :column-fn mysql
:suffix "FOR UPDATE"})
["SELECT * FROM [user] WHERE `id` IS NULL FOR UPDATE"]))
(is (= (builder/for-query
:t/user
{:q/id 9}
{:table-fn sql-server :column-fn mysql :order-by [:x/a [:x/b :desc]]
:name-fn builder/qualified-name})
["SELECT * FROM [t/user] WHERE `q/id` = ? ORDER BY `x/a`, `x/b` DESC" 9]))
(is (= (builder/for-query :t/user {:q/id nil}
{:table-fn sql-server :column-fn mysql
:name-fn builder/qualified-name})
["SELECT * FROM [t/user] WHERE `q/id` IS NULL"]))
(is (= (builder/for-query :t/user
{:q/id nil}
{:table-fn sql-server :column-fn mysql
:name-fn builder/qualified-name
:suffix "FOR UPDATE"})
["SELECT * FROM [t/user] WHERE `q/id` IS NULL FOR UPDATE"])))
(testing "by where clause"
(is (= (builder/for-query
:user
["id = ? and opt is null" 9]
{:table-fn sql-server :column-fn mysql :order-by [:a [:b :desc]]})
[(str "SELECT * FROM [user] WHERE id = ? and opt is null"
" ORDER BY `a`, `b` DESC") 9])))
(testing "by :all"
(is (= (builder/for-query
:user
:all
{:table-fn sql-server :column-fn mysql :order-by [:a [:b :desc]]})
["SELECT * FROM [user] ORDER BY `a`, `b` DESC"])))
(testing "top N"
(is (= (builder/for-query
:user
{:id 9}
{:table-fn sql-server :column-fn mysql :order-by [:a [:b :desc]]
:top 42})
["SELECT TOP ? * FROM [user] WHERE `id` = ? ORDER BY `a`, `b` DESC"
42 9])))
(testing "limit"
(testing "without offset"
(is (= (builder/for-query
:user
{:id 9}
{:table-fn sql-server :column-fn mysql :order-by [:a [:b :desc]]
:limit 42})
[(str "SELECT * FROM [user] WHERE `id` = ?"
" ORDER BY `a`, `b` DESC LIMIT ?")
9 42])))
(testing "with offset"
(is (= (builder/for-query
:user
{:id 9}
{:table-fn sql-server :column-fn mysql :order-by [:a [:b :desc]]
:limit 42 :offset 13})
[(str "SELECT * FROM [user] WHERE `id` = ?"
" ORDER BY `a`, `b` DESC LIMIT ? OFFSET ?")
9 42 13]))))
(testing "offset"
(testing "without fetch"
(is (= (builder/for-query
:user
{:id 9}
{:table-fn sql-server :column-fn mysql :order-by [:a [:b :desc]]
:offset 13})
[(str "SELECT * FROM [user] WHERE `id` = ?"
" ORDER BY `a`, `b` DESC OFFSET ? ROWS")
9 13])))
(testing "with fetch"
(is (= (builder/for-query
:user
{:id 9}
{:table-fn sql-server :column-fn mysql :order-by [:a [:b :desc]]
:offset 13 :fetch 42})
[(str "SELECT * FROM [user] WHERE `id` = ?"
" ORDER BY `a`, `b` DESC OFFSET ? ROWS FETCH NEXT ? ROWS ONLY")
9 13 42])))))
(deftest test-for-delete
(testing "by example"
(is (= (builder/for-delete
:user
{:opt nil :id 9}
{:table-fn sql-server :column-fn mysql})
["DELETE FROM [user] WHERE `opt` IS NULL AND `id` = ?" 9]))
(is (= (builder/for-delete
:t/user
{:q/opt nil :q/id 9}
{:table-fn sql-server :column-fn mysql})
["DELETE FROM [user] WHERE `opt` IS NULL AND `id` = ?" 9]))
(is (= (builder/for-delete
:t/user
{:q/opt nil :q/id 9}
{:table-fn sql-server :column-fn mysql
:name-fn builder/qualified-name})
["DELETE FROM [t/user] WHERE `q/opt` IS NULL AND `q/id` = ?" 9])))
(testing "by where clause"
(is (= (builder/for-delete
:user
["id = ? and opt is null" 9]
{:table-fn sql-server :column-fn mysql})
["DELETE FROM [user] WHERE id = ? and opt is null" 9]))
(is (= (builder/for-delete
:t/user
["id = ? and opt is null" 9]
{:table-fn sql-server :column-fn mysql})
["DELETE FROM [user] WHERE id = ? and opt is null" 9]))
(is (= (builder/for-delete
:t/user
["id = ? and opt is null" 9]
{:table-fn sql-server :column-fn mysql
:name-fn builder/qualified-name})
["DELETE FROM [t/user] WHERE id = ? and opt is null" 9]))))
(deftest test-for-update
(testing "empty example (would be a SQL error)"
(is (thrown? AssertionError ; changed in #44
(builder/for-update :user
{:status 42}
{}
{:table-fn sql-server :column-fn mysql}))))
(testing "by example"
(is (= (builder/for-update :user
{:status 42}
{:id 9}
{:table-fn sql-server :column-fn mysql})
["UPDATE [user] SET `status` = ? WHERE `id` = ?" 42 9]))
(is (= (builder/for-update :t/user
{:q/status 42}
{:q/id 9}
{:table-fn sql-server :column-fn mysql})
["UPDATE [user] SET `status` = ? WHERE `id` = ?" 42 9]))
(is (= (builder/for-update :t/user
{:q/status 42}
{:q/id 9}
{:table-fn sql-server :column-fn mysql
:name-fn builder/qualified-name})
["UPDATE [t/user] SET `q/status` = ? WHERE `q/id` = ?" 42 9])))
(testing "by where clause, with nil set value"
(is (= (builder/for-update :user
{:status 42, :opt nil}
["id = ?" 9]
{:table-fn sql-server :column-fn mysql})
["UPDATE [user] SET `status` = ?, `opt` = ? WHERE id = ?" 42 nil 9]))))
(deftest test-for-inserts
(testing "single insert"
(is (= (builder/for-insert :user
{:id 9 :status 42 :opt nil}
{:table-fn sql-server :column-fn mysql})
["INSERT INTO [user] (`id`, `status`, `opt`) VALUES (?, ?, ?)" 9 42 nil]))
(is (= (builder/for-insert :t/user
{:q/id 9 :q/status 42 :q/opt nil}
{:table-fn sql-server :column-fn mysql})
["INSERT INTO [user] (`id`, `status`, `opt`) VALUES (?, ?, ?)" 9 42 nil]))
(is (= (builder/for-insert :t/user
{:q/id 9 :q/status 42 :q/opt nil}
{:table-fn sql-server :column-fn mysql
:name-fn builder/qualified-name})
["INSERT INTO [t/user] (`q/id`, `q/status`, `q/opt`) VALUES (?, ?, ?)" 9 42 nil])))
(testing "multi-row insert (normal mode)"
(is (= (builder/for-insert-multi :user
[:id :status]
[[42 "hello"]
[35 "world"]
[64 "dollars"]]
{:table-fn sql-server :column-fn mysql})
["INSERT INTO [user] (`id`, `status`) VALUES (?, ?), (?, ?), (?, ?)" 42 "hello" 35 "world" 64 "dollars"]))
(is (= (builder/for-insert-multi :t/user
[:q/id :q/status]
[[42 "hello"]
[35 "world"]
[64 "dollars"]]
{:table-fn sql-server :column-fn mysql})
["INSERT INTO [user] (`id`, `status`) VALUES (?, ?), (?, ?), (?, ?)" 42 "hello" 35 "world" 64 "dollars"]))
(is (= (builder/for-insert-multi :t/user
[:q/id :q/status]
[[42 "hello"]
[35 "world"]
[64 "dollars"]]
{:table-fn sql-server :column-fn mysql
:name-fn builder/qualified-name})
["INSERT INTO [t/user] (`q/id`, `q/status`) VALUES (?, ?), (?, ?), (?, ?)" 42 "hello" 35 "world" 64 "dollars"])))
(testing "multi-row insert (batch mode)"
(is (= (builder/for-insert-multi :user
[:id :status]
[[42 "hello"]
[35 "world"]
[64 "dollars"]]
{:table-fn sql-server :column-fn mysql :batch true})
["INSERT INTO [user] (`id`, `status`) VALUES (?, ?)" [42 "hello"] [35 "world"] [64 "dollars"]]))
(is (= (builder/for-insert-multi :t/user
[:q/id :q/status]
[[42 "hello"]
[35 "world"]
[64 "dollars"]]
{:table-fn sql-server :column-fn mysql :batch true})
["INSERT INTO [user] (`id`, `status`) VALUES (?, ?)" [42 "hello"] [35 "world"] [64 "dollars"]]))
(is (= (builder/for-insert-multi :t/user
[:q/id :q/status]
[[42 "hello"]
[35 "world"]
[64 "dollars"]]
{:table-fn sql-server :column-fn mysql :batch true
:name-fn builder/qualified-name})
["INSERT INTO [t/user] (`q/id`, `q/status`) VALUES (?, ?)" [42 "hello"] [35 "world"] [64 "dollars"]]))))
next-jdbc-1.3.955/test/next/jdbc/sql_test.clj 0000664 0000000 0000000 00000026415 14700603111 0020744 0 ustar 00root root 0000000 0000000 ;; copyright (c) 2019-2024 Sean Corfield, all rights reserved
(ns next.jdbc.sql-test
"Tests for the syntactic sugar SQL functions."
(:require [clojure.test :refer [deftest is testing use-fixtures]]
[next.jdbc :as jdbc]
[next.jdbc.specs :as specs]
[next.jdbc.sql :as sql]
[next.jdbc.test-fixtures
:refer [with-test-db ds column default-options
derby? jtds? maria? mssql? mysql? postgres? sqlite?]]
[next.jdbc.types :refer [as-other as-real as-varchar]]))
(set! *warn-on-reflection* true)
(use-fixtures :once with-test-db)
(specs/instrument)
(deftest test-query
(let [ds-opts (jdbc/with-options (ds) (default-options))
rs (sql/query ds-opts ["select * from fruit order by id"])]
(is (= 4 (count rs)))
(is (every? map? rs))
(is (every? meta rs))
(is (= 1 ((column :FRUIT/ID) (first rs))))
(is (= 4 ((column :FRUIT/ID) (last rs))))))
(deftest test-find-all-offset
(let [ds-opts (jdbc/with-options (ds) (default-options))
rs (sql/find-by-keys
ds-opts :fruit :all
(assoc
(if (or (mysql?) (sqlite?))
{:limit 2 :offset 1}
{:offset 1 :fetch 2})
:columns [:ID
["CASE WHEN grade > 91 THEN 'ok ' ELSE 'bad' END"
:QUALITY]]
:order-by [:id]))]
(is (= 2 (count rs)))
(is (every? map? rs))
(is (every? meta rs))
(is (every? #(= 2 (count %)) rs))
(is (= 2 ((column :FRUIT/ID) (first rs))))
(is (= "ok " ((column :QUALITY) (first rs))))
(is (= 3 ((column :FRUIT/ID) (last rs))))
(is (= "bad" ((column :QUALITY) (last rs))))))
(deftest test-find-by-keys
(let [ds-opts (jdbc/with-options (ds) (default-options))]
(let [rs (sql/find-by-keys ds-opts :fruit {:appearance "neon-green"})]
(is (vector? rs))
(is (= [] rs)))
(let [rs (sql/find-by-keys ds-opts :fruit {:appearance "yellow"})]
(is (= 1 (count rs)))
(is (every? map? rs))
(is (every? meta rs))
(is (= 2 ((column :FRUIT/ID) (first rs)))))))
(deftest test-aggregate-by-keys
(let [ds-opts (jdbc/with-options (ds) (default-options))]
(let [count-v (sql/aggregate-by-keys ds-opts :fruit "count(*)" {:appearance "neon-green"})]
(is (number? count-v))
(is (= 0 count-v)))
(let [count-v (sql/aggregate-by-keys ds-opts :fruit "count(*)" {:appearance "yellow"})]
(is (= 1 count-v)))
(let [count-v (sql/aggregate-by-keys ds-opts :fruit "count(*)" :all)]
(is (= 4 count-v)))
(let [max-id (sql/aggregate-by-keys ds-opts :fruit "max(id)" :all)]
(is (= 4 max-id)))
(let [min-name (sql/aggregate-by-keys ds-opts :fruit "min(name)" :all)]
(is (= "Apple" min-name)))
(is (thrown? IllegalArgumentException
(sql/aggregate-by-keys ds-opts :fruit "count(*)" :all {:columns []})))))
(deftest test-get-by-id
(let [ds-opts (jdbc/with-options (ds) (default-options))]
(is (nil? (sql/get-by-id ds-opts :fruit -1)))
(let [row (sql/get-by-id ds-opts :fruit 3)]
(is (map? row))
(is (= "Peach" ((column :FRUIT/NAME) row))))
(let [row (sql/get-by-id ds-opts :fruit "juicy" :appearance {})]
(is (map? row))
(is (= 4 ((column :FRUIT/ID) row)))
(is (= "Orange" ((column :FRUIT/NAME) row))))
(let [row (sql/get-by-id ds-opts :fruit "Banana" :FRUIT/NAME {})]
(is (map? row))
(is (= 2 ((column :FRUIT/ID) row))))))
(deftest test-update!
(let [ds-opts (jdbc/with-options (ds) (default-options))]
(try
(is (= {:next.jdbc/update-count 1}
(sql/update! ds-opts :fruit {:appearance "brown"} {:id 2})))
(is (= "brown" ((column :FRUIT/APPEARANCE)
(sql/get-by-id ds-opts :fruit 2))))
(finally
(sql/update! ds-opts :fruit {:appearance "yellow"} {:id 2})))
(try
(is (= {:next.jdbc/update-count 1}
(sql/update! ds-opts :fruit {:appearance "green"}
["name = ?" "Banana"])))
(is (= "green" ((column :FRUIT/APPEARANCE)
(sql/get-by-id ds-opts :fruit 2))))
(finally
(sql/update! ds-opts :fruit {:appearance "yellow"} {:id 2})))))
(deftest test-insert-delete
(let [new-key (cond (derby?) :1
(jtds?) :ID
(maria?) :insert_id
(mssql?) :GENERATED_KEYS
(mysql?) :GENERATED_KEY
(postgres?) :fruit/id
:else :FRUIT/ID)]
(testing "single insert/delete"
(is (== 5 (new-key (sql/insert! (ds) :fruit
{:name (as-varchar "Kiwi")
:appearance "green & fuzzy"
:cost 100 :grade (as-real 99.9)}
{:suffix
(when (sqlite?)
"RETURNING *")}))))
(is (= 5 (count (sql/query (ds) ["select * from fruit"]))))
(is (= {:next.jdbc/update-count 1}
(sql/delete! (ds) :fruit {:id 5})))
(is (= 4 (count (sql/query (ds) ["select * from fruit"])))))
(testing "multiple insert/delete"
(is (= (cond (derby?)
[nil] ; WTF Apache Derby?
(mssql?)
[8M]
(maria?)
[6]
:else
[6 7 8])
(mapv new-key
(sql/insert-multi! (ds) :fruit
[:name :appearance :cost :grade]
[["Kiwi" "green & fuzzy" 100 99.9]
["Grape" "black" 10 50]
["Lemon" "yellow" 20 9.9]]
{:suffix
(when (sqlite?)
"RETURNING *")}))))
(is (= 7 (count (sql/query (ds) ["select * from fruit"]))))
(is (= {:next.jdbc/update-count 1}
(sql/delete! (ds) :fruit {:id 6})))
(is (= 6 (count (sql/query (ds) ["select * from fruit"]))))
(is (= {:next.jdbc/update-count 2}
(sql/delete! (ds) :fruit ["id > ?" 4])))
(is (= 4 (count (sql/query (ds) ["select * from fruit"])))))
(testing "multiple insert/delete with sequential cols/rows" ; per #43
(is (= (cond (derby?)
[nil] ; WTF Apache Derby?
(mssql?)
[11M]
(maria?)
[9]
:else
[9 10 11])
(mapv new-key
(sql/insert-multi! (ds) :fruit
'(:name :appearance :cost :grade)
'(("Kiwi" "green & fuzzy" 100 99.9)
("Grape" "black" 10 50)
("Lemon" "yellow" 20 9.9))
{:suffix
(when (sqlite?)
"RETURNING *")}))))
(is (= 7 (count (sql/query (ds) ["select * from fruit"]))))
(is (= {:next.jdbc/update-count 1}
(sql/delete! (ds) :fruit {:id 9})))
(is (= 6 (count (sql/query (ds) ["select * from fruit"]))))
(is (= {:next.jdbc/update-count 2}
(sql/delete! (ds) :fruit ["id > ?" 4])))
(is (= 4 (count (sql/query (ds) ["select * from fruit"])))))
(testing "multiple insert/delete with maps"
(is (= (cond (derby?)
[nil] ; WTF Apache Derby?
(mssql?)
[14M]
(maria?)
[12]
:else
[12 13 14])
(mapv new-key
(sql/insert-multi! (ds) :fruit
[{:name "Kiwi"
:appearance "green & fuzzy"
:cost 100
:grade 99.9}
{:name "Grape"
:appearance "black"
:cost 10
:grade 50}
{:name "Lemon"
:appearance "yellow"
:cost 20
:grade 9.9}]
{:suffix
(when (sqlite?)
"RETURNING *")}))))
(is (= 7 (count (sql/query (ds) ["select * from fruit"]))))
(is (= {:next.jdbc/update-count 1}
(sql/delete! (ds) :fruit {:id 12})))
(is (= 6 (count (sql/query (ds) ["select * from fruit"]))))
(is (= {:next.jdbc/update-count 2}
(sql/delete! (ds) :fruit ["id > ?" 10])))
(is (= 4 (count (sql/query (ds) ["select * from fruit"])))))
(testing "empty insert-multi!" ; per #44 and #264
(is (= [] (sql/insert-multi! (ds) :fruit
[:name :appearance :cost :grade]
[]
{:suffix
(when (sqlite?)
"RETURNING *")})))
;; per #264 the following should all be legal too:
(is (= [] (sql/insert-multi! (ds) :fruit
[]
{:suffix
(when (sqlite?)
"RETURNING *")})))
(is (= [] (sql/insert-multi! (ds) :fruit
[]
[]
{:suffix
(when (sqlite?)
"RETURNING *")})))
(is (= [] (sql/insert-multi! (ds) :fruit [])))
(is (= [] (sql/insert-multi! (ds) :fruit [] []))))))
(deftest no-empty-example-maps
(is (thrown? clojure.lang.ExceptionInfo
(sql/find-by-keys (ds) :fruit {})))
(is (thrown? clojure.lang.ExceptionInfo
(sql/update! (ds) :fruit {} {})))
(is (thrown? clojure.lang.ExceptionInfo
(sql/delete! (ds) :fruit {}))))
(deftest no-empty-columns
(is (thrown? clojure.lang.ExceptionInfo
(sql/insert-multi! (ds) :fruit [] [[] [] []]))))
(deftest no-mismatched-columns
(is (thrown? IllegalArgumentException
(sql/insert-multi! (ds) :fruit [{:name "Apple"} {:cost 1.23}]))))
(deftest no-empty-order-by
(is (thrown? clojure.lang.ExceptionInfo
(sql/find-by-keys (ds) :fruit
{:name "Apple"}
{:order-by []}))))
(deftest array-in
(when (postgres?)
(let [data (sql/find-by-keys (ds) :fruit ["id = any(?)" (int-array [1 2 3 4])])]
(is (= 4 (count data))))))
(deftest enum-pg
(when (postgres?)
(let [r (sql/insert! (ds) :lang-test {:lang (as-other "fr")}
jdbc/snake-kebab-opts)]
(is (= {:lang-test/lang "fr"} r)))))
next-jdbc-1.3.955/test/next/jdbc/test_fixtures.clj 0000664 0000000 0000000 00000022706 14700603111 0022015 0 ustar 00root root 0000000 0000000 ;; copyright (c) 2019-2021 Sean Corfield, all rights reserved
(ns next.jdbc.test-fixtures
"Multi-database testing fixtures."
(:require [clojure.string :as str]
[next.jdbc :as jdbc]
[next.jdbc.prepare :as prep]
[next.jdbc.sql :as sql])
(:import (io.zonky.test.db.postgres.embedded EmbeddedPostgres)))
(set! *warn-on-reflection* true)
(def ^:private test-derby {:dbtype "derby" :dbname "clojure_test_derby" :create true})
(def ^:private test-h2-mem {:dbtype "h2:mem" :dbname "clojure_test_h2_mem"})
(def ^:private test-h2 {:dbtype "h2" :dbname "clojure_test_h2"})
(def ^:private test-hsql {:dbtype "hsqldb" :dbname "clojure_test_hsqldb"})
(def ^:private test-sqlite {:dbtype "sqlite" :dbname "clojure_test_sqlite"})
;; this is just a dummy db-spec -- it's handled in with-test-db below
(def ^:private test-postgres-map {:dbtype "embedded-postgres" :dbname "clojure_test"})
(def ^:private test-postgres
(when-not (System/getenv "NEXT_JDBC_NO_POSTGRES") test-postgres-map))
;; it takes a while to spin up so we kick it off at startup
(defonce embedded-pg (when test-postgres (future (EmbeddedPostgres/start))))
(def ^:private test-mysql-map
(merge (if (System/getenv "NEXT_JDBC_TEST_MARIADB")
{:dbtype "mariadb"}
{:dbtype "mysql" :disableMariaDbDriver true})
{:dbname "clojure_test" :useSSL false :allowMultiQueries true
:user "root" :password (System/getenv "MYSQL_ROOT_PASSWORD")}))
(def ^:private test-mysql
(when (System/getenv "NEXT_JDBC_TEST_MYSQL") test-mysql-map))
(defn- create-clojure-test []
(when test-mysql
(let [mysql (assoc test-mysql :dbname "mysql")]
(println "Creating clojure-test database in MySQL...")
(loop [n 0]
(when (try
(jdbc/execute-one! mysql ["create database if not exists clojure_test"])
false ; done
(catch Throwable t
(when (< 10 n) (throw t))
(println "\t" (ex-message t) "(will retry)")
(Thread/sleep 3000)
true))
(recur (inc n))))
(println "...done!"))))
(def ^:private test-mssql-map
{:dbtype "mssql" :dbname "model" :encrypt false :trustServerCertificate true
:user "sa" :password (System/getenv "MSSQL_SA_PASSWORD")})
(def ^:private test-mssql
(when (System/getenv "NEXT_JDBC_TEST_MSSQL") test-mssql-map))
(def ^:private test-jtds-map
{:dbtype "jtds" :dbname "model"
:user "sa" :password (System/getenv "MSSQL_SA_PASSWORD")})
(def ^:private test-jtds
(when (System/getenv "NEXT_JDBC_TEST_MSSQL") test-jtds-map))
(def ^:private test-db-specs
(cond-> [test-derby test-h2-mem test-h2 test-hsql test-sqlite]
test-postgres (conj test-postgres)
test-mysql (conj test-mysql)
test-mssql (conj test-mssql test-jtds)))
(def ^:private test-db-spec (atom nil))
(defn derby? [] (= "derby" (:dbtype @test-db-spec)))
(defn hsqldb? [] (= "hsqldb" (:dbtype @test-db-spec)))
(defn jtds? [] (= "jtds" (:dbtype @test-db-spec)))
(defn maria? [] (= "mariadb" (:dbtype @test-db-spec)))
(defn mssql? [] (#{"jtds" "mssql"} (:dbtype @test-db-spec)))
(defn mysql? [] (#{"mariadb" "mysql"} (:dbtype @test-db-spec)))
(defn postgres? [] (= "embedded-postgres" (:dbtype @test-db-spec)))
(defn sqlite? [] (= "sqlite" (:dbtype @test-db-spec)))
(defn stored-proc? [] (not (#{"derby" "h2" "h2:mem" "sqlite"} (:dbtype @test-db-spec))))
(defn column [k]
(let [n (namespace k)]
(keyword (when n (cond (postgres?) (str/lower-case n)
(mssql?) (str/lower-case n)
(mysql?) (str/lower-case n)
:else n))
(cond (postgres?) (str/lower-case (name k))
:else (name k)))))
(defn default-options []
(if (mssql?) ; so that we get table names back from queries
{:result-type :scroll-insensitive :concurrency :read-only}
{}))
(def ^:private test-datasource (atom nil))
(defn db
"Tests should call this to get the db-spec to use inside a fixture."
[]
@test-db-spec)
(defn ds
"Tests should call this to get the DataSource to use inside a fixture."
[]
@test-datasource)
(defn- do-commands
"Example from migration docs: this serves as a test for it."
[connectable commands]
(if (instance? java.sql.Connection connectable)
(with-open [stmt (prep/statement connectable)]
(run! #(.addBatch stmt %) commands)
(into [] (.executeBatch stmt)))
(with-open [conn (jdbc/get-connection connectable)]
(do-commands conn commands))))
(defn with-test-db
"Given a test function (or suite), run it in the context of an in-memory
H2 database set up with a simple fruit table containing four rows of data.
Tests can reach into here and call ds (above) to get a DataSource for use
in test functions (that operate inside this fixture)."
[t]
(doseq [db test-db-specs]
(reset! test-db-spec db)
(if (= "embedded-postgres" (:dbtype db))
(reset! test-datasource
(.getPostgresDatabase ^EmbeddedPostgres @embedded-pg))
(reset! test-datasource (jdbc/get-datasource db)))
(let [fruit (if (mysql?) "fruit" "FRUIT") ; MySQL is case sensitive!
btest (if (mysql?) "btest" "BTEST")
auto-inc-pk
(cond (or (derby?) (hsqldb?))
(str "GENERATED ALWAYS AS IDENTITY"
" (START WITH 1, INCREMENT BY 1)"
" PRIMARY KEY")
(postgres?)
(str "GENERATED ALWAYS AS IDENTITY"
" PRIMARY KEY")
(mssql?)
"IDENTITY PRIMARY KEY"
(sqlite?)
"PRIMARY KEY AUTOINCREMENT"
:else
"AUTO_INCREMENT PRIMARY KEY")]
(with-open [con (jdbc/get-connection (ds))]
(when (stored-proc?)
(try
(jdbc/execute-one! con ["DROP PROCEDURE FRUITP"])
(catch Throwable _)))
(try
(do-commands con [(str "DROP TABLE " fruit)])
(catch Exception _))
(try
(do-commands con [(str "DROP TABLE " btest)])
(catch Exception _))
(when (postgres?)
(try
(do-commands con ["DROP TABLE LANG_TEST"])
(catch Exception _))
(try
(do-commands con ["DROP TYPE LANGUAGE"])
(catch Exception _))
(do-commands con ["CREATE TYPE LANGUAGE AS ENUM('en','fr','de')"])
(do-commands con ["
CREATE TABLE LANG_TEST (
LANG LANGUAGE NOT NULL
)"]))
(do-commands con [(str "
CREATE TABLE " fruit " (
ID INTEGER " auto-inc-pk ",
NAME VARCHAR(32),
APPEARANCE VARCHAR(32) DEFAULT NULL,
COST INT DEFAULT NULL,
GRADE REAL DEFAULT NULL
)")])
(let [created (atom false)]
;; MS SQL Server does not support bool/boolean:
(doseq [btype ["BOOL" "BOOLEAN" "BIT"]]
;; Derby does not support bit:
(doseq [bitty ["BIT" "SMALLINT"]]
(try
(when-not @created
(do-commands con [(str "
CREATE TABLE " btest " (
NAME VARCHAR(32),
IS_IT " btype ",
TWIDDLE " bitty "
)")])
(reset! created true))
(catch Throwable _))))
(when-not @created
(println (:dbtype db) "failed btest creation")
#_(throw (ex-info (str (:dbtype db) " has no boolean type?") {}))))
(when (stored-proc?)
(let [[begin end] (if (postgres?) ["$$" "$$"] ["BEGIN" "END"])]
(try
(do-commands con [(str "
CREATE PROCEDURE FRUITP" (cond (hsqldb?) "() READS SQL DATA DYNAMIC RESULT SETS 2 "
(mssql?) " AS "
(postgres?) "() LANGUAGE SQL AS "
:else "() ") "
" begin " " (if (hsqldb?)
(str "ATOMIC
DECLARE result1 CURSOR WITH RETURN FOR SELECT * FROM " fruit " WHERE COST < 90;
DECLARE result2 CURSOR WITH RETURN FOR SELECT * FROM " fruit " WHERE GRADE >= 90.0;
OPEN result1;
OPEN result2;")
(str "
SELECT * FROM " fruit " WHERE COST < 90;
SELECT * FROM " fruit " WHERE GRADE >= 90.0;")) "
" end "
")])
(catch Throwable t
(println 'procedure (:dbtype db) (ex-message t))))))
(sql/insert-multi! con :fruit
[:name :appearance :cost :grade]
[["Apple" "red" 59 nil]
["Banana" "yellow" nil 92.2]
["Peach" nil 139 90.0]
["Orange" "juicy" 89 88.6]]
{:return-keys false})
(t)))))
(create-clojure-test)
(comment
;; this is a convenience to bring next.jdbc's test dependencies
;; into any REPL running Clojure 1.12.0's new add-libs API
;; which allows me to develop and test next.jdbc inside my work's
;; "everything" REPL environment
(require '[clojure.repl.deps :refer [add-libs]]
'[clojure.edn :as edn])
(def test-deps (-> (slurp "https://raw.githubusercontent.com/seancorfield/next-jdbc/develop/deps.edn")
(edn/read-string)
:aliases
:test
:extra-deps))
(add-libs test-deps)
;; now you can load this file... and then you can load other test
;; files and run their tests as needed... which will leave (ds)
;; set to the embedded PostgreSQL datasource -- reset it with this:
(let [db test-h2-mem #_test-mysql-map]
(reset! test-db-spec db)
(reset! test-datasource (jdbc/get-datasource db))))
next-jdbc-1.3.955/test/next/jdbc/transaction_test.clj 0000664 0000000 0000000 00000001167 14700603111 0022467 0 ustar 00root root 0000000 0000000 ;; copyright (c) 2019-2021 Sean Corfield, all rights reserved
(ns next.jdbc.transaction-test
"Stub test namespace for transaction handling."
(:require [clojure.test :refer [deftest is testing use-fixtures]]
[next.jdbc :as jdbc]
[next.jdbc.specs :as specs]
[next.jdbc.test-fixtures :refer [with-test-db db ds column
default-options
derby? mssql? mysql? postgres?]]
[next.jdbc.transaction :as tx]))
(set! *warn-on-reflection* true)
(use-fixtures :once with-test-db)
(specs/instrument)
next-jdbc-1.3.955/test/next/jdbc/types_test.clj 0000664 0000000 0000000 00000000736 14700603111 0021307 0 ustar 00root root 0000000 0000000 ;; copyright (c) 2020-2021 Sean Corfield, all rights reserved
(ns next.jdbc.types-test
"Some tests for the type-assist functions."
(:require [clojure.test :refer [deftest is]]
[next.jdbc.types :refer [as-varchar]]))
(set! *warn-on-reflection* true)
(deftest as-varchar-test
(let [v (as-varchar "Hello")]
(is (= "Hello" (v)))
(is (contains? (meta v) 'next.jdbc.prepare/set-parameter))
(is (fn? (get (meta v) 'next.jdbc.prepare/set-parameter)))))
next-jdbc-1.3.955/test/next/jdbc_test.clj 0000664 0000000 0000000 00000137126 14700603111 0020147 0 ustar 00root root 0000000 0000000 ;; copyright (c) 2019-2021 Sean Corfield, all rights reserved
(ns next.jdbc-test
"Basic tests for the primary API of `next.jdbc`."
(:require [clojure.core.reducers :as r]
[clojure.string :as str]
[clojure.test :refer [deftest is testing use-fixtures]]
[next.jdbc :as jdbc]
[next.jdbc.connection :as c]
[next.jdbc.test-fixtures
:refer [with-test-db db ds column
default-options stored-proc?
derby? hsqldb? jtds? mssql? mysql? postgres? sqlite?]]
[next.jdbc.prepare :as prep]
[next.jdbc.result-set :as rs]
[next.jdbc.specs :as specs]
[next.jdbc.types :as types])
(:import (com.zaxxer.hikari HikariDataSource)
(com.mchange.v2.c3p0 ComboPooledDataSource PooledDataSource)
(java.sql ResultSet ResultSetMetaData)))
(set! *warn-on-reflection* true)
;; around each test because of the folding tests using 1,000 rows
(use-fixtures :each with-test-db)
(specs/instrument)
(deftest spec-tests
(let [db-spec {:dbtype "h2:mem" :dbname "clojure_test"}]
;; some sanity checks on instrumented function calls:
(jdbc/get-datasource db-spec)
(jdbc/get-connection db-spec)
;; and again with options:
(let [db-spec' (jdbc/with-options db-spec {})]
(jdbc/get-datasource db-spec')
(jdbc/get-connection db-spec'))))
(deftest basic-tests
;; use ds-opts instead of (ds) anywhere you want default options applied:
(let [ds-opts (jdbc/with-options (ds) (default-options))]
(testing "plan"
(is (= "Apple"
(reduce (fn [_ row] (reduced (:name row)))
nil
(jdbc/plan
ds-opts
["select * from fruit where appearance = ?" "red"]))))
(is (= "Banana"
(reduce (fn [_ row] (reduced (:no-such-column row "Banana")))
nil
(jdbc/plan
ds-opts
["select * from fruit where appearance = ?" "red"])))))
(testing "execute-one!"
(is (nil? (jdbc/execute-one!
(ds)
["select * from fruit where appearance = ?" "neon-green"])))
(is (= "Apple" ((column :FRUIT/NAME)
(jdbc/execute-one!
ds-opts
["select * from fruit where appearance = ?" "red"]))))
(is (= "red" (:fruit/looks-like
(jdbc/execute-one!
ds-opts
["select appearance as looks_like from fruit where id = ?" 1]
jdbc/snake-kebab-opts))))
(let [ds' (jdbc/with-options ds-opts jdbc/snake-kebab-opts)]
(is (= "red" (:fruit/looks-like
(jdbc/execute-one!
ds'
["select appearance as looks_like from fruit where id = ?" 1])))))
(jdbc/with-transaction+options [ds' (jdbc/with-options ds-opts jdbc/snake-kebab-opts)]
(is (= (merge (default-options) jdbc/snake-kebab-opts)
(:options ds')))
(is (= "red" (:fruit/looks-like
(jdbc/execute-one!
ds'
["select appearance as looks_like from fruit where id = ?" 1])))))
(is (= "red" (:looks-like
(jdbc/execute-one!
ds-opts
["select appearance as looks_like from fruit where id = ?" 1]
jdbc/unqualified-snake-kebab-opts)))))
(testing "execute!"
(let [rs (jdbc/execute!
ds-opts
["select * from fruit where appearance = ?" "neon-green"])]
(is (vector? rs))
(is (= [] rs)))
(let [rs (jdbc/execute!
ds-opts
["select * from fruit where appearance = ?" "red"])]
(is (= 1 (count rs)))
(is (= 1 ((column :FRUIT/ID) (first rs)))))
(let [rs (jdbc/execute!
ds-opts
["select * from fruit order by id"]
{:builder-fn rs/as-maps})]
(is (every? map? rs))
(is (every? meta rs))
(is (= 4 (count rs)))
(is (= 1 ((column :FRUIT/ID) (first rs))))
(is (= 4 ((column :FRUIT/ID) (last rs)))))
(let [rs (jdbc/execute!
ds-opts
["select * from fruit order by id"]
{:builder-fn rs/as-arrays})]
(is (every? vector? rs))
(is (= 5 (count rs)))
(is (every? #(= 5 (count %)) rs))
;; columns come first
(is (every? qualified-keyword? (first rs)))
;; :FRUIT/ID should be first column
(is (= (column :FRUIT/ID) (ffirst rs)))
;; and all its corresponding values should be ints
(is (every? int? (map first (rest rs))))
(is (every? string? (map second (rest rs))))))
(testing "execute! with adapter"
(let [rs (jdbc/execute! ; test again, with adapter and lower columns
ds-opts
["select * from fruit order by id"]
{:builder-fn (rs/as-arrays-adapter
rs/as-lower-arrays
(fn [^ResultSet rs _ ^Integer i]
(.getObject rs i)))})]
(is (every? vector? rs))
(is (= 5 (count rs)))
(is (every? #(= 5 (count %)) rs))
;; columns come first
(is (every? qualified-keyword? (first rs)))
;; :fruit/id should be first column
(is (= :fruit/id (ffirst rs)))
;; and all its corresponding values should be ints
(is (every? int? (map first (rest rs))))
(is (every? string? (map second (rest rs))))))
(testing "execute! with unqualified"
(let [rs (jdbc/execute!
(ds)
["select * from fruit order by id"]
{:builder-fn rs/as-unqualified-maps})]
(is (every? map? rs))
(is (every? meta rs))
(is (= 4 (count rs)))
(is (= 1 ((column :ID) (first rs))))
(is (= 4 ((column :ID) (last rs)))))
(let [rs (jdbc/execute!
ds-opts
["select * from fruit order by id"]
{:builder-fn rs/as-unqualified-arrays})]
(is (every? vector? rs))
(is (= 5 (count rs)))
(is (every? #(= 5 (count %)) rs))
;; columns come first
(is (every? simple-keyword? (first rs)))
;; :ID should be first column
(is (= (column :ID) (ffirst rs)))
;; and all its corresponding values should be ints
(is (every? int? (map first (rest rs))))
(is (every? string? (map second (rest rs))))))
(testing "execute! with :max-rows / :maxRows"
(let [rs (jdbc/execute!
ds-opts
["select * from fruit order by id"]
{:max-rows 2})]
(is (every? map? rs))
(is (every? meta rs))
(is (= 2 (count rs)))
(is (= 1 ((column :FRUIT/ID) (first rs))))
(is (= 2 ((column :FRUIT/ID) (last rs)))))
(let [rs (jdbc/execute!
ds-opts
["select * from fruit order by id"]
{:statement {:maxRows 2}})]
(is (every? map? rs))
(is (every? meta rs))
(is (= 2 (count rs)))
(is (= 1 ((column :FRUIT/ID) (first rs))))
(is (= 2 ((column :FRUIT/ID) (last rs)))))))
(testing "prepare"
;; default options do not flow over get-connection
(let [rs (with-open [con (jdbc/get-connection (ds))
ps (jdbc/prepare
con
["select * from fruit order by id"]
(default-options))]
(jdbc/execute! ps))]
(is (every? map? rs))
(is (every? meta rs))
(is (= 4 (count rs)))
(is (= 1 ((column :FRUIT/ID) (first rs))))
(is (= 4 ((column :FRUIT/ID) (last rs)))))
;; default options do not flow over get-connection
(let [rs (with-open [con (jdbc/get-connection (ds))
ps (jdbc/prepare
con
["select * from fruit where id = ?"]
(default-options))]
(jdbc/execute! (prep/set-parameters ps [4]) nil {}))]
(is (every? map? rs))
(is (every? meta rs))
(is (= 1 (count rs)))
(is (= 4 ((column :FRUIT/ID) (first rs))))))
(testing "statement"
;; default options do not flow over get-connection
(let [rs (with-open [con (jdbc/get-connection (ds))]
(jdbc/execute! (prep/statement con (default-options))
["select * from fruit order by id"]))]
(is (every? map? rs))
(is (every? meta rs))
(is (= 4 (count rs)))
(is (= 1 ((column :FRUIT/ID) (first rs))))
(is (= 4 ((column :FRUIT/ID) (last rs)))))
;; default options do not flow over get-connection
(let [rs (with-open [con (jdbc/get-connection (ds))]
(jdbc/execute! (prep/statement con (default-options))
["select * from fruit where id = 4"]))]
(is (every? map? rs))
(is (every? meta rs))
(is (= 1 (count rs)))
(is (= 4 ((column :FRUIT/ID) (first rs))))))
(testing "transact"
(is (= [{:next.jdbc/update-count 1}]
(jdbc/transact (ds)
(fn [t] (jdbc/execute! t ["
INSERT INTO fruit (name, appearance, cost, grade)
VALUES ('Pear', 'green', 49, 47)
"]))
{:rollback-only true})))
(is (= 4 (count (jdbc/execute! (ds) ["select * from fruit"])))))
(testing "with-transaction rollback-only"
(is (not (jdbc/active-tx?)) "should not be in a transaction")
(is (= [{:next.jdbc/update-count 1}]
(jdbc/with-transaction [t (ds) {:rollback-only true}]
(is (jdbc/active-tx?) "should be in a transaction")
(jdbc/execute! t ["
INSERT INTO fruit (name, appearance, cost, grade)
VALUES ('Pear', 'green', 49, 47)
"]))))
(is (= 4 (count (jdbc/execute! (ds) ["select * from fruit"]))))
(is (not (jdbc/active-tx?)) "should not be in a transaction")
(with-open [con (jdbc/get-connection (ds))]
(let [ac (.getAutoCommit con)]
(is (= [{:next.jdbc/update-count 1}]
(jdbc/with-transaction [t con {:rollback-only true}]
(is (jdbc/active-tx?) "should be in a transaction")
(jdbc/execute! t ["
INSERT INTO fruit (name, appearance, cost, grade)
VALUES ('Pear', 'green', 49, 47)
"]))))
(is (= 4 (count (jdbc/execute! con ["select * from fruit"]))))
(is (= ac (.getAutoCommit con))))))
(testing "with-transaction exception"
(is (thrown? Throwable
(jdbc/with-transaction [t (ds)]
(jdbc/execute! t ["
INSERT INTO fruit (name, appearance, cost, grade)
VALUES ('Pear', 'green', 49, 47)
"])
(is (jdbc/active-tx?) "should be in a transaction")
(throw (ex-info "abort" {})))))
(is (= 4 (count (jdbc/execute! (ds) ["select * from fruit"]))))
(is (not (jdbc/active-tx?)) "should not be in a transaction")
(with-open [con (jdbc/get-connection (ds))]
(let [ac (.getAutoCommit con)]
(is (thrown? Throwable
(jdbc/with-transaction [t con]
(jdbc/execute! t ["
INSERT INTO fruit (name, appearance, cost, grade)
VALUES ('Pear', 'green', 49, 47)
"])
(is (jdbc/active-tx?) "should be in a transaction")
(throw (ex-info "abort" {})))))
(is (= 4 (count (jdbc/execute! con ["select * from fruit"]))))
(is (= ac (.getAutoCommit con))))))
(testing "with-transaction call rollback"
(is (= [{:next.jdbc/update-count 1}]
(jdbc/with-transaction [t (ds)]
(let [result (jdbc/execute! t ["
INSERT INTO fruit (name, appearance, cost, grade)
VALUES ('Pear', 'green', 49, 47)
"])]
(.rollback t)
;; still in a next.jdbc TX even tho' we rolled back!
(is (jdbc/active-tx?) "should be in a transaction")
result))))
(is (= 4 (count (jdbc/execute! (ds) ["select * from fruit"]))))
(is (not (jdbc/active-tx?)) "should not be in a transaction")
(with-open [con (jdbc/get-connection (ds))]
(let [ac (.getAutoCommit con)]
(is (= [{:next.jdbc/update-count 1}]
(jdbc/with-transaction [t con]
(let [result (jdbc/execute! t ["
INSERT INTO fruit (name, appearance, cost, grade)
VALUES ('Pear', 'green', 49, 47)
"])]
(.rollback t)
result))))
(is (= 4 (count (jdbc/execute! con ["select * from fruit"]))))
(is (= ac (.getAutoCommit con))))))
(testing "with-transaction with unnamed save point"
(is (= [{:next.jdbc/update-count 1}]
(jdbc/with-transaction [t (ds)]
(let [save-point (.setSavepoint t)
result (jdbc/execute! t ["
INSERT INTO fruit (name, appearance, cost, grade)
VALUES ('Pear', 'green', 49, 47)
"])]
(.rollback t save-point)
;; still in a next.jdbc TX even tho' we rolled back to a save point!
(is (jdbc/active-tx?) "should be in a transaction")
result))))
(is (= 4 (count (jdbc/execute! (ds) ["select * from fruit"]))))
(is (not (jdbc/active-tx?)) "should not be in a transaction")
(with-open [con (jdbc/get-connection (ds))]
(let [ac (.getAutoCommit con)]
(is (= [{:next.jdbc/update-count 1}]
(jdbc/with-transaction [t con]
(let [save-point (.setSavepoint t)
result (jdbc/execute! t ["
INSERT INTO fruit (name, appearance, cost, grade)
VALUES ('Pear', 'green', 49, 47)
"])]
(.rollback t save-point)
result))))
(is (= 4 (count (jdbc/execute! con ["select * from fruit"]))))
(is (= ac (.getAutoCommit con))))))
(testing "with-transaction with named save point"
(is (= [{:next.jdbc/update-count 1}]
(jdbc/with-transaction [t (ds)]
(let [save-point (.setSavepoint t (name (gensym)))
result (jdbc/execute! t ["
INSERT INTO fruit (name, appearance, cost, grade)
VALUES ('Pear', 'green', 49, 47)
"])]
(.rollback t save-point)
result))))
(is (= 4 (count (jdbc/execute! (ds) ["select * from fruit"]))))
(with-open [con (jdbc/get-connection (ds))]
(let [ac (.getAutoCommit con)]
(is (= [{:next.jdbc/update-count 1}]
(jdbc/with-transaction [t con]
(let [save-point (.setSavepoint t (name (gensym)))
result (jdbc/execute! t ["
INSERT INTO fruit (name, appearance, cost, grade)
VALUES ('Pear', 'green', 49, 47)
"])]
(.rollback t save-point)
result))))
(is (= 4 (count (jdbc/execute! con ["select * from fruit"]))))
(is (= ac (.getAutoCommit con)))))))
(deftest issue-146
;; since we use an embedded PostgreSQL data source, we skip this:
(when-not (or (postgres?)
;; and now we skip MS SQL because we can't use the db-spec
;; we'd need to build the jdbcUrl with encryption turned off:
(and (mssql?) (not (jtds?))))
(testing "Hikari and SavePoints"
(with-open [^HikariDataSource ds (c/->pool HikariDataSource
(let [db (db)]
(cond-> db
;; jTDS does not support isValid():
(jtds?)
(assoc :connectionTestQuery "SELECT 1")
;; HikariCP needs username, not user:
(contains? db :user)
(assoc :username (:user db)))))]
(testing "with-transaction with unnamed save point"
(is (= [{:next.jdbc/update-count 1}]
(jdbc/with-transaction [t ds]
(let [save-point (.setSavepoint t)
result (jdbc/execute! t ["
INSERT INTO fruit (name, appearance, cost, grade)
VALUES ('Pear', 'green', 49, 47)
"])]
(.rollback t save-point)
result))))
(is (= 4 (count (jdbc/execute! ds ["select * from fruit"]))))
(with-open [con (jdbc/get-connection ds)]
(let [ac (.getAutoCommit con)]
(is (= [{:next.jdbc/update-count 1}]
(jdbc/with-transaction [t con]
(let [save-point (.setSavepoint t)
result (jdbc/execute! t ["
INSERT INTO fruit (name, appearance, cost, grade)
VALUES ('Pear', 'green', 49, 47)
"])]
(.rollback t save-point)
result))))
(is (= 4 (count (jdbc/execute! con ["select * from fruit"]))))
(is (= ac (.getAutoCommit con))))))
(testing "with-transaction with named save point"
(is (= [{:next.jdbc/update-count 1}]
(jdbc/with-transaction [t ds]
(let [save-point (.setSavepoint t (name (gensym)))
result (jdbc/execute! t ["
INSERT INTO fruit (name, appearance, cost, grade)
VALUES ('Pear', 'green', 49, 47)
"])]
(.rollback t save-point)
result))))
(is (= 4 (count (jdbc/execute! ds ["select * from fruit"]))))
(with-open [con (jdbc/get-connection ds)]
(let [ac (.getAutoCommit con)]
(is (= [{:next.jdbc/update-count 1}]
(jdbc/with-transaction [t con]
(let [save-point (.setSavepoint t (name (gensym)))
result (jdbc/execute! t ["
INSERT INTO fruit (name, appearance, cost, grade)
VALUES ('Pear', 'green', 49, 47)
"])]
(.rollback t save-point)
result))))
(is (= 4 (count (jdbc/execute! con ["select * from fruit"]))))
(is (= ac (.getAutoCommit con))))))))
(testing "c3p0 and SavePoints"
(with-open [^PooledDataSource ds (c/->pool ComboPooledDataSource (db))]
(testing "with-transaction with unnamed save point"
(is (= [{:next.jdbc/update-count 1}]
(jdbc/with-transaction [t ds]
(let [save-point (.setSavepoint t)
result (jdbc/execute! t ["
INSERT INTO fruit (name, appearance, cost, grade)
VALUES ('Pear', 'green', 49, 47)
"])]
(.rollback t save-point)
result))))
(is (= 4 (count (jdbc/execute! ds ["select * from fruit"]))))
(with-open [con (jdbc/get-connection ds)]
(let [ac (.getAutoCommit con)]
(is (= [{:next.jdbc/update-count 1}]
(jdbc/with-transaction [t con]
(let [save-point (.setSavepoint t)
result (jdbc/execute! t ["
INSERT INTO fruit (name, appearance, cost, grade)
VALUES ('Pear', 'green', 49, 47)
"])]
(.rollback t save-point)
result))))
(is (= 4 (count (jdbc/execute! con ["select * from fruit"]))))
(is (= ac (.getAutoCommit con))))))
(testing "with-transaction with named save point"
(is (= [{:next.jdbc/update-count 1}]
(jdbc/with-transaction [t ds]
(let [save-point (.setSavepoint t (name (gensym)))
result (jdbc/execute! t ["
INSERT INTO fruit (name, appearance, cost, grade)
VALUES ('Pear', 'green', 49, 47)
"])]
(.rollback t save-point)
result))))
(is (= 4 (count (jdbc/execute! ds ["select * from fruit"]))))
(with-open [con (jdbc/get-connection ds)]
(let [ac (.getAutoCommit con)]
(is (= [{:next.jdbc/update-count 1}]
(jdbc/with-transaction [t con]
(let [save-point (.setSavepoint t (name (gensym)))
result (jdbc/execute! t ["
INSERT INTO fruit (name, appearance, cost, grade)
VALUES ('Pear', 'green', 49, 47)
"])]
(.rollback t save-point)
result))))
(is (= 4 (count (jdbc/execute! con ["select * from fruit"]))))
(is (= ac (.getAutoCommit con))))))))))
#_
(deftest duplicate-insert-test
;; this is primarily a look at exception types/information for #226
(try
(jdbc/execute! (ds) ["
INSERT INTO fruit (id, name, appearance, cost, grade)
VALUES (1234, '1234', '1234', 1234, 1234)
"])
(try
(jdbc/execute! (ds) ["
INSERT INTO fruit (id, name, appearance, cost, grade)
VALUES (1234, '1234', '1234', 1234, 1234)
"])
(println (:dbtype (db)) "allowed duplicate insert")
(catch java.sql.SQLException t
(println (:dbtype (db)) "duplicate insert threw" (type t)
"error" (.getErrorCode t) "state" (.getSQLState t)
"\n\t" (ex-message t))))
(catch java.sql.SQLException t
(println (:dbtype (db)) "will not allow specific ID" (type t)
"error" (.getErrorCode t) "state" (.getSQLState t)
"\n\t" (ex-message t)))))
(deftest bool-tests
(doseq [[n b] [["zero" 0] ["one" 1] ["false" false] ["true" true]]
:let [v-bit (if (number? b) b (if b 1 0))
v-bool (if (number? b) (pos? b) b)]]
(jdbc/execute-one!
(ds)
["insert into btest (name,is_it,twiddle) values (?,?,?)"
n
(if (postgres?)
(types/as-boolean b)
b) ; 0, 1, false, true are all acceptable
(cond (hsqldb?)
v-bool ; hsqldb requires a boolean here
(postgres?)
(types/as-other v-bit) ; really postgres??
:else
v-bit)]))
(let [data (jdbc/execute! (ds) ["select * from btest"]
(default-options))]
(if (sqlite?)
(is (every? number? (map (column :BTEST/IS_IT) data)))
(is (every? boolean? (map (column :BTEST/IS_IT) data))))
(if (or (sqlite?) (derby?))
(is (every? number? (map (column :BTEST/TWIDDLE) data)))
(is (every? boolean? (map (column :BTEST/TWIDDLE) data)))))
(let [data (jdbc/execute! (ds) ["select * from btest"]
(cond-> (default-options)
(sqlite?)
(assoc :builder-fn
(rs/builder-adapter
rs/as-maps
(fn [builder ^ResultSet rs ^Integer i]
(let [rsm ^ResultSetMetaData (:rsmeta builder)]
(rs/read-column-by-index
;; we only use bit and bool for
;; sqlite (not boolean)
(if (#{"BIT" "BOOL"} (.getColumnTypeName rsm i))
(.getBoolean rs i)
(.getObject rs i))
rsm
i)))))))]
(is (every? boolean? (map (column :BTEST/IS_IT) data)))
(if (derby?)
(is (every? number? (map (column :BTEST/TWIDDLE) data)))
(is (every? boolean? (map (column :BTEST/TWIDDLE) data)))))
(let [data (reduce (fn [acc row]
(conj acc (cond-> (select-keys row [:is_it :twiddle])
(sqlite?)
(update :is_it pos?)
(or (sqlite?) (derby?))
(update :twiddle pos?))))
[]
(jdbc/plan (ds) ["select * from btest"]))]
(is (every? boolean? (map :is_it data)))
(is (every? boolean? (map :twiddle data)))))
(deftest execute-batch-tests
(testing "simple batch insert"
(is (= [1 1 1 1 1 1 1 1 1 13]
(jdbc/with-transaction [t (ds) {:rollback-only true}]
(with-open [ps (jdbc/prepare t ["
INSERT INTO fruit (name, appearance) VALUES (?,?)
"])]
(let [result (jdbc/execute-batch! ps [["fruit1" "one"]
["fruit2" "two"]
["fruit3" "three"]
["fruit4" "four"]
["fruit5" "five"]
["fruit6" "six"]
["fruit7" "seven"]
["fruit8" "eight"]
["fruit9" "nine"]])]
(conj result (count (jdbc/execute! t ["select * from fruit"]))))))))
(is (= 4 (count (jdbc/execute! (ds) ["select * from fruit"])))))
(testing "small batch insert"
(is (= [1 1 1 1 1 1 1 1 1 13]
(jdbc/with-transaction [t (ds) {:rollback-only true}]
(with-open [ps (jdbc/prepare t ["
INSERT INTO fruit (name, appearance) VALUES (?,?)
"])]
(let [result (jdbc/execute-batch! ps [["fruit1" "one"]
["fruit2" "two"]
["fruit3" "three"]
["fruit4" "four"]
["fruit5" "five"]
["fruit6" "six"]
["fruit7" "seven"]
["fruit8" "eight"]
["fruit9" "nine"]]
{:batch-size 3})]
(conj result (count (jdbc/execute! t ["select * from fruit"]))))))))
(is (= 4 (count (jdbc/execute! (ds) ["select * from fruit"])))))
(testing "big batch insert"
(is (= [1 1 1 1 1 1 1 1 1 13]
(jdbc/with-transaction [t (ds) {:rollback-only true}]
(with-open [ps (jdbc/prepare t ["
INSERT INTO fruit (name, appearance) VALUES (?,?)
"])]
(let [result (jdbc/execute-batch! ps [["fruit1" "one"]
["fruit2" "two"]
["fruit3" "three"]
["fruit4" "four"]
["fruit5" "five"]
["fruit6" "six"]
["fruit7" "seven"]
["fruit8" "eight"]
["fruit9" "nine"]]
{:batch-size 8})]
(conj result (count (jdbc/execute! t ["select * from fruit"]))))))))
(is (= 4 (count (jdbc/execute! (ds) ["select * from fruit"])))))
(testing "large batch insert"
(when-not (or (jtds?) (sqlite?))
(is (= [1 1 1 1 1 1 1 1 1 13]
(jdbc/with-transaction [t (ds) {:rollback-only true}]
(with-open [ps (jdbc/prepare t ["
INSERT INTO fruit (name, appearance) VALUES (?,?)
"])]
(let [result (jdbc/execute-batch! ps [["fruit1" "one"]
["fruit2" "two"]
["fruit3" "three"]
["fruit4" "four"]
["fruit5" "five"]
["fruit6" "six"]
["fruit7" "seven"]
["fruit8" "eight"]
["fruit9" "nine"]]
{:batch-size 4
:large true})]
(conj result (count (jdbc/execute! t ["select * from fruit"]))))))))
(is (= 4 (count (jdbc/execute! (ds) ["select * from fruit"]))))))
(testing "return generated keys"
(when-not (or (mssql?) (sqlite?))
(let [results
(jdbc/with-transaction [t (ds) {:rollback-only true}]
(with-open [ps (jdbc/prepare t ["
INSERT INTO fruit (name, appearance) VALUES (?,?)
"]
{:return-keys true})]
(let [result (jdbc/execute-batch! ps [["fruit1" "one"]
["fruit2" "two"]
["fruit3" "three"]
["fruit4" "four"]
["fruit5" "five"]
["fruit6" "six"]
["fruit7" "seven"]
["fruit8" "eight"]
["fruit9" "nine"]]
{:batch-size 4
:return-generated-keys true})]
(conj result (count (jdbc/execute! t ["select * from fruit"]))))))]
(is (= 13 (last results)))
(is (every? map? (butlast results)))
;; Derby and SQLite only return one generated key per batch so there
;; are only three keys, plus the overall count here:
(is (< 3 (count results))))
(is (= 4 (count (jdbc/execute! (ds) ["select * from fruit"])))))))
(deftest execute-batch-connectable-tests
(testing "simple batch insert"
(is (= [1 1 1 1 1 1 1 1 1 13]
(try
(let [result (jdbc/execute-batch! (ds)
"INSERT INTO fruit (name, appearance) VALUES (?,?)"
[["fruit1" "one"]
["fruit2" "two"]
["fruit3" "three"]
["fruit4" "four"]
["fruit5" "five"]
["fruit6" "six"]
["fruit7" "seven"]
["fruit8" "eight"]
["fruit9" "nine"]]
{})]
(conj result (count (jdbc/execute! (ds) ["select * from fruit"]))))
(finally
(jdbc/execute-one! (ds) ["delete from fruit where id > 4"])))))
(is (= 4 (count (jdbc/execute! (ds) ["select * from fruit"])))))
(testing "batch with-options"
(is (= [1 1 1 1 1 1 1 1 1 13]
(try
(let [result (jdbc/execute-batch! (jdbc/with-options (ds) {})
"INSERT INTO fruit (name, appearance) VALUES (?,?)"
[["fruit1" "one"]
["fruit2" "two"]
["fruit3" "three"]
["fruit4" "four"]
["fruit5" "five"]
["fruit6" "six"]
["fruit7" "seven"]
["fruit8" "eight"]
["fruit9" "nine"]]
{})]
(conj result (count (jdbc/execute! (ds) ["select * from fruit"]))))
(finally
(jdbc/execute-one! (ds) ["delete from fruit where id > 4"])))))
(is (= 4 (count (jdbc/execute! (ds) ["select * from fruit"])))))
(testing "batch with-logging"
(is (= [1 1 1 1 1 1 1 1 1 13]
(try
(let [result (jdbc/execute-batch! (jdbc/with-logging (ds) println println)
"INSERT INTO fruit (name, appearance) VALUES (?,?)"
[["fruit1" "one"]
["fruit2" "two"]
["fruit3" "three"]
["fruit4" "four"]
["fruit5" "five"]
["fruit6" "six"]
["fruit7" "seven"]
["fruit8" "eight"]
["fruit9" "nine"]]
{})]
(conj result (count (jdbc/execute! (ds) ["select * from fruit"]))))
(finally
(jdbc/execute-one! (ds) ["delete from fruit where id > 4"])))))
(is (= 4 (count (jdbc/execute! (ds) ["select * from fruit"])))))
(testing "small batch insert"
(is (= [1 1 1 1 1 1 1 1 1 13]
(try
(let [result (jdbc/execute-batch! (ds)
"INSERT INTO fruit (name, appearance) VALUES (?,?)"
[["fruit1" "one"]
["fruit2" "two"]
["fruit3" "three"]
["fruit4" "four"]
["fruit5" "five"]
["fruit6" "six"]
["fruit7" "seven"]
["fruit8" "eight"]
["fruit9" "nine"]]
{:batch-size 3})]
(conj result (count (jdbc/execute! (ds) ["select * from fruit"]))))
(finally
(jdbc/execute-one! (ds) ["delete from fruit where id > 4"])))))
(is (= 4 (count (jdbc/execute! (ds) ["select * from fruit"])))))
(testing "big batch insert"
(is (= [1 1 1 1 1 1 1 1 1 13]
(try
(let [result (jdbc/execute-batch! (ds)
"INSERT INTO fruit (name, appearance) VALUES (?,?)"
[["fruit1" "one"]
["fruit2" "two"]
["fruit3" "three"]
["fruit4" "four"]
["fruit5" "five"]
["fruit6" "six"]
["fruit7" "seven"]
["fruit8" "eight"]
["fruit9" "nine"]]
{:batch-size 8})]
(conj result (count (jdbc/execute! (ds) ["select * from fruit"]))))
(finally
(jdbc/execute-one! (ds) ["delete from fruit where id > 4"])))))
(is (= 4 (count (jdbc/execute! (ds) ["select * from fruit"])))))
(testing "large batch insert"
(when-not (or (jtds?) (sqlite?))
(is (= [1 1 1 1 1 1 1 1 1 13]
(try
(let [result (jdbc/execute-batch! (ds)
"INSERT INTO fruit (name, appearance) VALUES (?,?)"
[["fruit1" "one"]
["fruit2" "two"]
["fruit3" "three"]
["fruit4" "four"]
["fruit5" "five"]
["fruit6" "six"]
["fruit7" "seven"]
["fruit8" "eight"]
["fruit9" "nine"]]
{:batch-size 4
:large true})]
(conj result (count (jdbc/execute! (ds) ["select * from fruit"]))))
(finally
(jdbc/execute-one! (ds) ["delete from fruit where id > 4"])))))
(is (= 4 (count (jdbc/execute! (ds) ["select * from fruit"]))))))
(testing "return generated keys"
(when-not (or (mssql?) (sqlite?))
(let [results
(try
(let [result (jdbc/execute-batch! (ds)
"INSERT INTO fruit (name, appearance) VALUES (?,?)"
[["fruit1" "one"]
["fruit2" "two"]
["fruit3" "three"]
["fruit4" "four"]
["fruit5" "five"]
["fruit6" "six"]
["fruit7" "seven"]
["fruit8" "eight"]
["fruit9" "nine"]]
;; note: we need both :return-keys true for creating
;; the PreparedStatement and :return-generated-keys
;; true to control the way batch execution happens:
{:batch-size 4 :return-keys true
:return-generated-keys true})]
(conj result (count (jdbc/execute! (ds) ["select * from fruit"]))))
(finally
(jdbc/execute-one! (ds) ["delete from fruit where id > 4"])))]
(is (= 13 (last results)))
(is (every? map? (butlast results)))
;; Derby and SQLite only return one generated key per batch so there
;; are only three keys, plus the overall count here:
(is (< 3 (count results))))
(is (= 4 (count (jdbc/execute! (ds) ["select * from fruit"])))))))
(deftest folding-test
(jdbc/execute-one! (ds) ["delete from fruit"])
(with-open [con (jdbc/get-connection (ds))
ps (jdbc/prepare con ["insert into fruit(name) values (?)"])]
(jdbc/execute-batch! ps (mapv #(vector (str "Fruit-" %)) (range 1 1001))))
(testing "foldable result set"
(testing "from a Connection"
(let [result
(with-open [con (jdbc/get-connection (ds))]
(r/foldcat
(r/map (column :FRUIT/NAME)
(jdbc/plan con ["select * from fruit order by id"]
(default-options)))))]
(is (= 1000 (count result)))
(is (= "Fruit-1" (first result)))
(is (= "Fruit-1000" (last result)))))
(testing "from a DataSource"
(doseq [n [2 3 4 5 100 300 500 700 900 1000 1100]]
(testing (str "folding with n = " n)
(let [result
(try
(r/fold n r/cat r/append!
(r/map (column :FRUIT/NAME)
(jdbc/plan (ds) ["select * from fruit order by id"]
(default-options))))
(catch java.util.concurrent.RejectedExecutionException _
[]))]
(is (= 1000 (count result)))
(is (= "Fruit-1" (first result)))
(is (= "Fruit-1000" (last result)))))))
(testing "from a PreparedStatement"
(let [result
(with-open [con (jdbc/get-connection (ds))
stmt (jdbc/prepare con
["select * from fruit order by id"]
(default-options))]
(r/foldcat
(r/map (column :FRUIT/NAME)
(jdbc/plan stmt nil (default-options)))))]
(is (= 1000 (count result)))
(is (= "Fruit-1" (first result)))
(is (= "Fruit-1000" (last result)))))
(testing "from a Statement"
(let [result
(with-open [con (jdbc/get-connection (ds))
stmt (prep/statement con (default-options))]
(r/foldcat
(r/map (column :FRUIT/NAME)
(jdbc/plan stmt ["select * from fruit order by id"]
(default-options)))))]
(is (= 1000 (count result)))
(is (= "Fruit-1" (first result)))
(is (= "Fruit-1000" (last result)))))))
(deftest connection-tests
(testing "datasource via jdbcUrl"
(when-not (postgres?)
(let [[url etc] (#'c/spec->url+etc (db))
ds (jdbc/get-datasource (assoc etc :jdbcUrl url))]
(cond (derby?) (is (= {:create true} etc))
(mssql?) (is (= (cond-> #{:user :password}
(not (jtds?))
(conj :encrypt :trustServerCertificate))
(set (keys etc))))
(mysql?) (is (= #{:user :password :useSSL :allowMultiQueries}
(disj (set (keys etc)) :disableMariaDbDriver)))
:else (is (= {} etc)))
(is (instance? javax.sql.DataSource ds))
(is (str/index-of (pr-str ds) (str "jdbc:"
(cond (jtds?)
"jtds:sqlserver"
(mssql?)
"sqlserver"
:else
(:dbtype (db))))))
;; checks get-datasource on a DataSource is identity
(is (identical? ds (jdbc/get-datasource ds)))
(with-open [con (jdbc/get-connection ds {})]
(is (instance? java.sql.Connection con)))))))
(deftest multi-rs
(when (mssql?)
(testing "script with multiple result sets"
(let [multi-rs
(jdbc/execute! (ds)
[(str "begin"
" select * from fruit;"
" select * from fruit where id < 4;"
" end")]
{:multi-rs true})]
(is (= 2 (count multi-rs)))
(is (= 4 (count (first multi-rs))))
(is (= 3 (count (second multi-rs)))))))
(when (mysql?)
(testing "script with multiple result sets"
(let [multi-rs
(jdbc/execute! (ds)
[(str "select * from fruit;"
" select * from fruit where id < 4")]
{:multi-rs true})]
(is (= 2 (count multi-rs)))
(is (= 4 (count (first multi-rs))))
(is (= 3 (count (second multi-rs)))))))
(when (stored-proc?)
(testing "stored proc; multiple result sets"
(try
(let [multi-rs
(jdbc/execute! (ds)
[(if (mssql?) "EXEC FRUITP" "CALL FRUITP()")]
{:multi-rs true})
zero-updates [{:next.jdbc/update-count 0}]]
(cond (postgres?) ; does not support multiple result sets yet
;; 4.7.3 (and earlier?) returned the fake zero-updates
;; 4.7.4 returns -1 for update count and an empty result set
(is (= 0 (count multi-rs)))
(hsqldb?)
(do
(is (= 3 (count multi-rs)))
(is (= zero-updates (first multi-rs))))
(mysql?)
(do
(is (= 3 (count multi-rs)))
(is (= zero-updates (last multi-rs))))
:else
(is (= 2 (count multi-rs)))))
(catch Throwable t
(println 'call-proc (:dbtype (db)) (ex-message t) (some-> t (ex-cause) (ex-message))))))))
(deftest plan-misuse
(let [s (pr-str (jdbc/plan (ds) ["select * from fruit"]))]
(is (re-find #"missing reduction" s)))
(let [s (pr-str (into [] (jdbc/plan (ds) ["select * from fruit"])))]
(is (re-find #"missing `map` or `reduce`" s)))
;; this may succeed or not, depending on how the driver handles things
;; most drivers will error because the ResultSet was closed before pr-str
;; is invoked (which will attempt to print each row)
(let [s (pr-str (into [] (take 3) (jdbc/plan (ds) ["select * from fruit"]
(default-options))))]
(is (or (re-find #"missing `map` or `reduce`" s)
(re-find #"(?i)^\[#:fruit\{.*:id.*\}\]$" s))))
(is (every? #(re-find #"(?i)^#:fruit\{.*:id.*\}$" %)
(into [] (map str) (jdbc/plan (ds) ["select * from fruit"]
(default-options)))))
(is (every? #(re-find #"(?i)^#:fruit\{.*:id.*\}$" %)
(into [] (map pr-str) (jdbc/plan (ds) ["select * from fruit"]
(default-options)))))
(is (thrown? IllegalArgumentException
(doall (take 3 (jdbc/plan (ds) ["select * from fruit"]))))))
(deftest issue-204
(testing "against a Connection"
(is (seq (with-open [con (jdbc/get-connection (ds))]
(jdbc/on-connection
[x con]
(jdbc/execute! x ["select * from fruit"]))))))
(testing "against a wrapped Connection"
(is (seq (with-open [con (jdbc/get-connection (ds))]
(jdbc/on-connection
[x (jdbc/with-options con {})]
(jdbc/execute! x ["select * from fruit"]))))))
(testing "against a wrapped Datasource"
(is (seq (jdbc/on-connection
[x (jdbc/with-options (ds) {})]
(jdbc/execute! x ["select * from fruit"])))))
(testing "against a Datasource"
(is (seq (jdbc/on-connection
[x (ds)]
(jdbc/execute! x ["select * from fruit"]))))))
(deftest issue-256
(testing "against a Connection"
(is (seq (with-open [con (jdbc/get-connection (ds))]
(jdbc/on-connection+options
[x con] ; raw connection stays raw
(is (instance? java.sql.Connection x))
(jdbc/execute! x ["select * from fruit"]))))))
(testing "against a wrapped Connection"
(is (seq (with-open [con (jdbc/get-connection (ds))]
(jdbc/on-connection+options
[x (jdbc/with-options con {:test-option 42})]
;; ensure we get the same wrapped connection
(is (instance? java.sql.Connection (:connectable x)))
(is (= {:test-option 42} (:options x)))
(jdbc/execute! x ["select * from fruit"]))))))
(testing "against a wrapped Datasource"
(is (seq (jdbc/on-connection+options
[x (jdbc/with-options (ds) {:test-option 42})]
;; ensure we get a wrapped connection
(is (instance? java.sql.Connection (:connectable x)))
(is (= {:test-option 42} (:options x)))
(jdbc/execute! x ["select * from fruit"])))))
(testing "against a Datasource"
(is (seq (jdbc/on-connection+options
[x (ds)] ; unwrapped datasource has no options
;; ensure we get a wrapped connection (empty options)
(is (instance? java.sql.Connection (:connectable x)))
(is (= {} (:options x)))
(jdbc/execute! x ["select * from fruit"]))))))