aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIvan Lozano <ivanlozano@google.com>2020-12-15 19:28:35 +0000
committerAutomerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>2020-12-15 19:28:35 +0000
commit31e798974258a4ef2ed38baf2285fc7e812fb070 (patch)
tree824e84d8185505bb96916f8626e9b920321929d7
parent44d9728040b8f23b5826279161d6de4be30ebf9e (diff)
parent8ec3ca030fa6bf990399771f284350c74bd8dd6f (diff)
downloadrusqlite-31e798974258a4ef2ed38baf2285fc7e812fb070.tar.gz
Merge "Upgrade rust/crates/rusqlite to 0.24.2" am: 34d2a4e441 am: 08908cfb6d am: 8ec3ca030f
Original change: https://android-review.googlesource.com/c/platform/external/rust/crates/rusqlite/+/1519204 MUST ONLY BE SUBMITTED BY AUTOMERGER Change-Id: Ibcd10813c9e67dc4000a0c897445937e2b9f90ad
-rw-r--r--.cargo_vcs_info.json2
-rw-r--r--.github/workflows/main.yml142
-rw-r--r--Android.bp9
-rw-r--r--Cargo.toml33
-rw-r--r--Cargo.toml.orig29
-rw-r--r--METADATA8
-rw-r--r--README.md4
-rw-r--r--src/blob/mod.rs (renamed from src/blob.rs)240
-rw-r--r--src/blob/pos_io.rs281
-rw-r--r--src/cache.rs4
-rw-r--r--src/error.rs14
-rw-r--r--src/row.rs4
-rw-r--r--src/types/mod.rs12
13 files changed, 603 insertions, 179 deletions
diff --git a/.cargo_vcs_info.json b/.cargo_vcs_info.json
index 8625996..deaa273 100644
--- a/.cargo_vcs_info.json
+++ b/.cargo_vcs_info.json
@@ -1,5 +1,5 @@
{
"git": {
- "sha1": "79ab6894f0f6f651515ab6eac6043685befe9bd8"
+ "sha1": "cef6dbbb26211baebedbebe6e114f5bcf9be2431"
}
}
diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
index 5e93ceb..38c28c9 100644
--- a/.github/workflows/main.yml
+++ b/.github/workflows/main.yml
@@ -13,19 +13,19 @@ env:
RUST_BACKTRACE: 1
jobs:
test:
- name: Test
+ name: Test ${{ matrix.target }}
strategy:
fail-fast: false
matrix:
- platform:
+ include:
- { target: x86_64-pc-windows-msvc, os: windows-latest }
- { target: x86_64-unknown-linux-gnu, os: ubuntu-latest }
- { target: x86_64-apple-darwin, os: macos-latest }
- { target: x86_64-pc-windows-gnu, os: windows-latest, host: -x86_64-pc-windows-gnu }
- runs-on: ${{ matrix.platform.os }}
+ runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v2
@@ -33,48 +33,49 @@ jobs:
# we use actions-rs/toolchain.
- uses: hecrj/setup-rust-action@v1
with:
- rust-version: stable${{ matrix.platform.host }}
- targets: ${{ matrix.platform.target }}
+ rust-version: stable${{ matrix.host }}
+ targets: ${{ matrix.target }}
- - uses: actions-rs/cargo@v1
- with:
- command: build
- args: --features bundled --workspace --all-targets
-
- - uses: actions-rs/cargo@v1
- with:
- command: test
- args: --features bundled --workspace --all-targets
+ - run: cargo build --features bundled --workspace --all-targets --verbose
+ - run: cargo test --features bundled --workspace --all-targets --verbose
+ - run: cargo test --features bundled --workspace --doc --verbose
- - name: "cargo test --features 'bundled-full session buildtime_bindgen'"
+ - name: Test Features
# TODO: clang is installed on these -- but `bindgen` can't find it...
- if: matrix.platform.os != 'windows-latest'
- uses: actions-rs/cargo@v1
- with:
- command: test
- args: --features 'bundled-full session buildtime_bindgen' --all-targets --workspace
-
- - name: "cargo test --doc --features 'bundled-full session buildtime_bindgen'"
- # TODO: clang is installed on these -- but `bindgen` can't find it...
- if: matrix.platform.os != 'windows-latest'
- uses: actions-rs/cargo@v1
- with:
- command: test
- args: --features 'bundled-full session buildtime_bindgen' --doc --workspace
-
- - name: "cargo test --features bundled-full"
- uses: actions-rs/cargo@v1
- with:
- command: test
- args: --features bundled-full --all-targets --workspace
+ if: matrix.os != 'windows-latest'
+ run: |
+ cargo test --features 'bundled-full session buildtime_bindgen time' --all-targets --workspace --verbose
+ cargo test --features 'bundled-full session buildtime_bindgen time' --doc --workspace --verbose
- name: Static build
# Do we expect this to work / should we test with gnu toolchain?
- if: matrix.platform.os == 'x86_64-pc-windows-msvc'
- shell: cmd
- run: |
- set RUSTFLAGS=-Ctarget-feature=+crt-static
- cargo build --features bundled
+ if: matrix.os == 'x86_64-pc-windows-msvc'
+ env:
+ RUSTFLAGS: -Ctarget-feature=+crt-static
+ run: cargo build --features bundled
+
+ winsqlite3:
+ name: Test with winsqlite3
+ runs-on: windows-latest
+ steps:
+ - uses: actions/checkout@v2
+ - uses: hecrj/setup-rust-action@v1
+ # TODO: Should this test GNU toolchain? What about +crt-static?
+ # TODO: Is it worth testing other features?
+ - run: cargo build --features winsqlite3 --workspace --all-targets --verbose
+ - run: cargo test --features winsqlite3 --workspace --all-targets --verbose
+
+ sqlcipher:
+ name: Test with sqlcipher
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+ - uses: hecrj/setup-rust-action@v1
+ - run: sudo apt-get install sqlcipher libsqlcipher-dev
+ - run: sqlcipher --version
+ # TODO: Is it worth testing other features?
+ - run: cargo build --features sqlcipher --workspace --all-targets --verbose
+ - run: cargo test --features sqlcipher --workspace --all-targets --verbose
sanitizer:
name: Address Sanitizer
@@ -82,11 +83,9 @@ jobs:
steps:
- uses: actions/checkout@v2
# Need nightly rust.
- - uses: actions-rs/toolchain@v1
+ - uses: hecrj/setup-rust-action@v1
with:
- profile: minimal
- toolchain: nightly
- override: true
+ rust-version: nightly
components: rust-src
- name: Tests with asan
env:
@@ -98,32 +97,20 @@ jobs:
# leak sanitization, but we don't care about backtraces here, so long
# as the other tests have them.
RUST_BACKTRACE: '0'
- run: cargo -Z build-std test --features 'bundled-full session buildtime_bindgen with-asan' --target x86_64-unknown-linux-gnu
+ run: cargo -Z build-std test --features 'bundled-full session buildtime_bindgen time with-asan' --target x86_64-unknown-linux-gnu
# Ensure clippy doesn't complain.
clippy:
name: Clippy
- strategy:
- fail-fast: false
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- - uses: actions-rs/toolchain@v1
- with:
- profile: minimal
- toolchain: stable
- override: true
- - run: rustup component add clippy
- - uses: actions-rs/cargo@v1
- with:
- command: clippy
- # clippy with just bundled
- args: --all-targets --workspace --features bundled -- -D warnings
- - uses: actions-rs/cargo@v1
+ - uses: hecrj/setup-rust-action@v1
with:
- command: clippy
- # Clippy with all non-conflicting features
- args: --all-targets --workspace --features 'bundled-full session buildtime_bindgen' -- -D warnings
+ components: clippy
+ - run: cargo clippy --all-targets --workspace --features bundled -- -D warnings
+ # Clippy with all non-conflicting features
+ - run: cargo clippy --all-targets --workspace --features 'bundled-full session buildtime_bindgen time' -- -D warnings
# Ensure patch is formatted.
fmt:
@@ -131,17 +118,10 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- # This has a matcher for rustfmt errors, so we use it even though
- # elsewhere we use actions-rs/toolchain.
- uses: hecrj/setup-rust-action@v1
with:
- rust-version: stable
components: rustfmt
- - run: rustup component add rustfmt
- - uses: actions-rs/cargo@v1
- with:
- command: fmt
- args: --all -- --check
+ - run: cargo fmt --all -- --check
# Detect cases where documentation links don't resolve and such.
doc:
@@ -149,32 +129,24 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- - uses: actions-rs/toolchain@v1
- with:
- profile: minimal
- # Docs.rs uses nightly, which allows for easier syntax for linking to functions.
- toolchain: nightly
- override: true
- - uses: actions-rs/cargo@v1
+ - uses: hecrj/setup-rust-action@v1
with:
- # Need to use `cargo rustdoc` to actually get it to respect -D
- # warnings... Note: this also requires nightly.
- command: rustdoc
- args: --features 'bundled-full session buildtime_bindgen' -- -D warnings
+ rust-version: nightly
+ # Need to use `cargo rustdoc` to actually get it to respect -D
+ # warnings... Note: this also requires nightly.
+ - run: cargo rustdoc --features 'bundled-full session buildtime_bindgen time' -- -D warnings
codecov:
name: Generate code coverage
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- - uses: actions-rs/toolchain@v1
- with:
- toolchain: stable
- override: true
-
+ - uses: hecrj/setup-rust-action@v1
- name: Run cargo-tarpaulin
uses: actions-rs/tarpaulin@v0.1
with:
+ # Intentionally omit time feature until we're on time 0.3, at which
+ # point it should be added to `bundled-full`.
args: '--features "bundled-full session buildtime_bindgen"'
- name: Upload to codecov.io
diff --git a/Android.bp b/Android.bp
index f97371c..c7b491d 100644
--- a/Android.bp
+++ b/Android.bp
@@ -11,20 +11,21 @@ rust_library {
"libbitflags",
"libfallible_iterator",
"libfallible_streaming_iterator",
+ "libhashlink",
"liblibsqlite3_sys",
- "liblru_cache",
"libmemchr",
"libsmallvec",
],
}
// dependent_library ["feature_list"]
+// ahash-0.4.6
// bitflags-1.2.1 "default"
// fallible-iterator-0.2.0 "default,std"
// fallible-streaming-iterator-0.1.9
+// hashbrown-0.9.1 "ahash,default,inline-more"
+// hashlink-0.6.0
// libsqlite3-sys-0.20.1 "bundled_bindings,default,min_sqlite_version_3_6_8,pkg-config,vcpkg"
-// linked-hash-map-0.5.3
-// lru-cache-0.1.2
// memchr-2.3.4 "default,std,use_std"
// pkg-config-0.3.19
-// smallvec-1.4.2
+// smallvec-1.5.1
diff --git a/Cargo.toml b/Cargo.toml
index 6842cfa..8efebab 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -13,7 +13,7 @@
[package]
edition = "2018"
name = "rusqlite"
-version = "0.24.0"
+version = "0.24.2"
authors = ["The rusqlite developers"]
description = "Ergonomic wrapper for SQLite"
documentation = "http://docs.rs/rusqlite/"
@@ -25,7 +25,7 @@ repository = "https://github.com/rusqlite/rusqlite"
[package.metadata.docs.rs]
all-features = false
default-target = "x86_64-unknown-linux-gnu"
-features = ["backup", "blob", "chrono", "collation", "functions", "limits", "load_extension", "serde_json", "time", "trace", "url", "vtab", "window", "modern_sqlite", "column_decltype"]
+features = ["array", "backup", "blob", "chrono", "collation", "functions", "limits", "load_extension", "serde_json", "time", "trace", "url", "vtab", "window", "modern_sqlite", "column_decltype"]
no-default-features = true
[package.metadata.playground]
@@ -53,10 +53,10 @@ harness = false
name = "exec"
harness = false
[dependencies.bitflags]
-version = "1.0"
+version = "1.2"
[dependencies.byteorder]
-version = "1.2"
+version = "1.3"
features = ["i128"]
optional = true
@@ -65,7 +65,7 @@ version = "0.4"
optional = true
[dependencies.csv]
-version = "1.0"
+version = "1.1"
optional = true
[dependencies.fallible-iterator]
@@ -74,32 +74,32 @@ version = "0.2"
[dependencies.fallible-streaming-iterator]
version = "0.1"
+[dependencies.hashlink]
+version = "0.6"
+
[dependencies.lazy_static]
-version = "1.0"
+version = "1.4"
optional = true
[dependencies.libsqlite3-sys]
-version = "0.20.0"
-
-[dependencies.lru-cache]
-version = "0.1"
+version = "0.20.1"
[dependencies.memchr]
-version = "2.2.0"
+version = "2.3"
[dependencies.serde_json]
version = "1.0"
optional = true
[dependencies.smallvec]
-version = "1.3"
+version = "1.0"
[dependencies.time]
version = "0.2"
optional = true
[dependencies.url]
-version = "2.0"
+version = "2.1"
optional = true
[dependencies.uuid]
@@ -112,16 +112,16 @@ version = "0.1"
version = "0.3"
[dev-dependencies.lazy_static]
-version = "1.0"
+version = "1.4"
[dev-dependencies.regex]
-version = "1.0"
+version = "1.3"
[dev-dependencies.tempfile]
version = "3.1.0"
[dev-dependencies.unicase]
-version = "2.4.0"
+version = "2.6.0"
[dev-dependencies.uuid]
version = "0.8"
@@ -154,6 +154,7 @@ unlock_notify = ["libsqlite3-sys/unlock_notify"]
vtab = ["libsqlite3-sys/min_sqlite_version_3_7_7", "lazy_static"]
wasm32-wasi-vfs = ["libsqlite3-sys/wasm32-wasi-vfs"]
window = ["functions"]
+winsqlite3 = ["libsqlite3-sys/winsqlite3"]
with-asan = ["libsqlite3-sys/with-asan"]
[badges.appveyor]
repository = "rusqlite/rusqlite"
diff --git a/Cargo.toml.orig b/Cargo.toml.orig
index 6394b09..857bb86 100644
--- a/Cargo.toml.orig
+++ b/Cargo.toml.orig
@@ -1,6 +1,6 @@
[package]
name = "rusqlite"
-version = "0.24.0"
+version = "0.24.2"
authors = ["The rusqlite developers"]
edition = "2018"
description = "Ergonomic wrapper for SQLite"
@@ -61,6 +61,7 @@ bundled-windows = ["libsqlite3-sys/bundled-windows"]
with-asan = ["libsqlite3-sys/with-asan"]
column_decltype = []
wasm32-wasi-vfs = ["libsqlite3-sys/wasm32-wasi-vfs"]
+winsqlite3 = ["libsqlite3-sys/winsqlite3"]
# Helper feature for enabling both `bundled` and most non-build-related optional
# features or dependencies. This is useful for running tests / clippy / etc. New
@@ -96,34 +97,34 @@ bundled-full = [
[dependencies]
time = { version = "0.2", optional = true }
-bitflags = "1.0"
-lru-cache = "0.1"
+bitflags = "1.2"
+hashlink = "0.6"
chrono = { version = "0.4", optional = true }
serde_json = { version = "1.0", optional = true }
-csv = { version = "1.0", optional = true }
-url = { version = "2.0", optional = true }
-lazy_static = { version = "1.0", optional = true }
-byteorder = { version = "1.2", features = ["i128"], optional = true }
+csv = { version = "1.1", optional = true }
+url = { version = "2.1", optional = true }
+lazy_static = { version = "1.4", optional = true }
+byteorder = { version = "1.3", features = ["i128"], optional = true }
fallible-iterator = "0.2"
fallible-streaming-iterator = "0.1"
-memchr = "2.2.0"
+memchr = "2.3"
uuid = { version = "0.8", optional = true }
-smallvec = "1.3"
+smallvec = "1.0"
[dev-dependencies]
doc-comment = "0.3"
tempfile = "3.1.0"
-lazy_static = "1.0"
-regex = "1.0"
+lazy_static = "1.4"
+regex = "1.3"
uuid = { version = "0.8", features = ["v4"] }
-unicase = "2.4.0"
+unicase = "2.6.0"
# Use `bencher` over criterion becasue it builds much faster and we don't have
# many benchmarks
bencher = "0.1"
[dependencies.libsqlite3-sys]
path = "libsqlite3-sys"
-version = "0.20.0"
+version = "0.20.1"
[[test]]
name = "config_log"
@@ -144,7 +145,7 @@ name = "exec"
harness = false
[package.metadata.docs.rs]
-features = [ "backup", "blob", "chrono", "collation", "functions", "limits", "load_extension", "serde_json", "time", "trace", "url", "vtab", "window", "modern_sqlite", "column_decltype" ]
+features = [ "array", "backup", "blob", "chrono", "collation", "functions", "limits", "load_extension", "serde_json", "time", "trace", "url", "vtab", "window", "modern_sqlite", "column_decltype" ]
all-features = false
no-default-features = true
default-target = "x86_64-unknown-linux-gnu"
diff --git a/METADATA b/METADATA
index c50227e..c2f666c 100644
--- a/METADATA
+++ b/METADATA
@@ -7,13 +7,13 @@ third_party {
}
url {
type: ARCHIVE
- value: "https://static.crates.io/crates/rusqlite/rusqlite-0.24.0.crate"
+ value: "https://static.crates.io/crates/rusqlite/rusqlite-0.24.2.crate"
}
- version: "0.24.0"
+ version: "0.24.2"
license_type: NOTICE
last_upgrade_date {
year: 2020
- month: 8
- day: 22
+ month: 12
+ day: 5
}
}
diff --git a/README.md b/README.md
index 300c2b3..09f0c20 100644
--- a/README.md
+++ b/README.md
@@ -119,11 +119,11 @@ You can adjust this behavior in a number of ways:
* If you use the `bundled` feature, `libsqlite3-sys` will use the
[cc](https://crates.io/crates/cc) crate to compile SQLite from source and
link against that. This source is embedded in the `libsqlite3-sys` crate and
- is currently SQLite 3.33.0 (as of `rusqlite` 0.24.0 / `libsqlite3-sys`
+ is currently SQLite 3.33.0 (as of `rusqlite` 0.24.1 / `libsqlite3-sys`
0.20.0). This is probably the simplest solution to any build problems. You can enable this by adding the following in your `Cargo.toml` file:
```toml
[dependencies.rusqlite]
- version = "0.24.0"
+ version = "0.24.2"
features = ["bundled"]
```
* You can set the `SQLITE3_LIB_DIR` to point to directory containing the SQLite
diff --git a/src/blob.rs b/src/blob/mod.rs
index f4ca951..7d7ec3d 100644
--- a/src/blob.rs
+++ b/src/blob/mod.rs
@@ -3,54 +3,188 @@
//! Note that SQLite does not provide API-level access to change the size of a
//! BLOB; that must be performed through SQL statements.
//!
+//! There are two choices for how to perform IO on a [`Blob`].
+//!
+//! 1. The implementations it provides of the `std::io::Read`, `std::io::Write`,
+//! and `std::io::Seek` traits.
+//!
+//! 2. A positional IO API, e.g. [`Blob::read_at`], [`Blob::write_at`] and
+//! similar.
+//!
+//! Documenting these in order:
+//!
+//! ## 1. `std::io` trait implementations.
+//!
//! `Blob` conforms to `std::io::Read`, `std::io::Write`, and `std::io::Seek`,
//! so it plays nicely with other types that build on these (such as
-//! `std::io::BufReader` and `std::io::BufWriter`). However, you must be
-//! careful with the size of the blob. For example, when using a `BufWriter`,
-//! the `BufWriter` will accept more data than the `Blob`
-//! will allow, so make sure to call `flush` and check for errors. (See the
-//! unit tests in this module for an example.)
+//! `std::io::BufReader` and `std::io::BufWriter`). However, you must be careful
+//! with the size of the blob. For example, when using a `BufWriter`, the
+//! `BufWriter` will accept more data than the `Blob` will allow, so make sure
+//! to call `flush` and check for errors. (See the unit tests in this module for
+//! an example.)
+//!
+//! ## 2. Positional IO
+//!
+//! `Blob`s also offer a `pread` / `pwrite`-style positional IO api in the form
+//! of [`Blob::read_at`], [`Blob::write_at`], [`Blob::raw_read_at`],
+//! [`Blob::read_at_exact`], and [`Blob::raw_read_at_exact`].
+//!
+//! These APIs all take the position to read from or write to from as a
+//! parameter, instead of using an internal `pos` value.
+//!
+//! ### Positional IO Read Variants
+//!
+//! For the `read` functions, there are several functions provided:
+//!
+//! - [`Blob::read_at`]
+//! - [`Blob::raw_read_at`]
+//! - [`Blob::read_at_exact`]
+//! - [`Blob::raw_read_at_exact`]
+//!
+//! These can be divided along two axes: raw/not raw, and exact/inexact:
+//!
+//! 1. Raw/not raw refers to the type of the destination buffer. The raw
+//! functions take a `&mut [MaybeUninit<u8>]` as the destination buffer,
+//! where the "normal" functions take a `&mut [u8]`.
+//!
+//! Using `MaybeUninit` here can be more efficient in some cases, but is
+//! often inconvenient, so both are provided.
+//!
+//! 2. Exact/inexact refers to to whether or not the entire buffer must be
+//! filled in order for the call to be considered a success.
+//!
+//! The "exact" functions require the provided buffer be entirely filled, or
+//! they return an error, wheras the "inexact" functions read as much out of
+//! the blob as is available, and return how much they were able to read.
+//!
+//! The inexact functions are preferrable if you do not know the size of the
+//! blob already, and the exact functions are preferrable if you do.
+//!
+//! ### Comparison to using the `std::io` traits:
+//!
+//! In general, the positional methods offer the following Pro/Cons compared to
+//! using the implementation `std::io::{Read, Write, Seek}` we provide for
+//! `Blob`:
+//!
+//! 1. (Pro) There is no need to first seek to a position in order to perform IO
+//! on it as the position is a parameter.
+//!
+//! 2. (Pro) `Blob`'s positional read functions don't mutate the blob in any
+//! way, and take `&self`. No `&mut` access required.
+//!
+//! 3. (Pro) Positional IO functions return `Err(rusqlite::Error)` on failure,
+//! rather than `Err(std::io::Error)`. Returning `rusqlite::Error` is more
+//! accurate and convenient.
+//!
+//! Note that for the `std::io` API, no data is lost however, and it can be
+//! recovered with `io_err.downcast::<rusqlite::Error>()` (this can be easy
+//! to forget, though).
//!
-//! ## Example
+//! 4. (Pro, for now). A `raw` version of the read API exists which can allow
+//! reading into a `&mut [MaybeUninit<u8>]` buffer, which avoids a potential
+//! costly initialization step. (However, `std::io` traits will certainly
+//! gain this someday, which is why this is only a "Pro, for now").
+//!
+//! 5. (Con) The set of functions is more bare-bones than what is offered in
+//! `std::io`, which has a number of adapters, handy algorithms, further
+//! traits.
+//!
+//! 6. (Con) No meaningful interoperability with other crates, so if you need
+//! that you must use `std::io`.
+//!
+//! To generalize: the `std::io` traits are useful because they conform to a
+//! standard interface that a lot of code knows how to handle, however that
+//! interface is not a perfect fit for [`Blob`], so another small set of
+//! functions is provided as well.
+//!
+//! # Example (`std::io`)
+//!
+//! ```rust
+//! # use rusqlite::blob::ZeroBlob;
+//! # use rusqlite::{Connection, DatabaseName, NO_PARAMS};
+//! # use std::error::Error;
+//! # use std::io::{Read, Seek, SeekFrom, Write};
+//! # fn main() -> Result<(), Box<dyn Error>> {
+//! let db = Connection::open_in_memory()?;
+//! db.execute_batch("CREATE TABLE test_table (content BLOB);")?;
+//!
+//! // Insert a BLOB into the `content` column of `test_table`. Note that the Blob
+//! // I/O API provides no way of inserting or resizing BLOBs in the DB -- this
+//! // must be done via SQL.
+//! db.execute(
+//! "INSERT INTO test_table (content) VALUES (ZEROBLOB(10))",
+//! NO_PARAMS,
+//! )?;
+//!
+//! // Get the row id off the BLOB we just inserted.
+//! let rowid = db.last_insert_rowid();
+//! // Open the BLOB we just inserted for IO.
+//! let mut blob = db.blob_open(DatabaseName::Main, "test_table", "content", rowid, false)?;
+//!
+//! // Write some data into the blob. Make sure to test that the number of bytes
+//! // written matches what you expect; if you try to write too much, the data
+//! // will be truncated to the size of the BLOB.
+//! let bytes_written = blob.write(b"01234567")?;
+//! assert_eq!(bytes_written, 8);
+//!
+//! // Move back to the start and read into a local buffer.
+//! // Same guidance - make sure you check the number of bytes read!
+//! blob.seek(SeekFrom::Start(0))?;
+//! let mut buf = [0u8; 20];
+//! let bytes_read = blob.read(&mut buf[..])?;
+//! assert_eq!(bytes_read, 10); // note we read 10 bytes because the blob has size 10
+//!
+//! // Insert another BLOB, this time using a parameter passed in from
+//! // rust (potentially with a dynamic size).
+//! db.execute("INSERT INTO test_table (content) VALUES (?)", &[ZeroBlob(64)])?;
+//!
+//! // given a new row ID, we can reopen the blob on that row
+//! let rowid = db.last_insert_rowid();
+//! blob.reopen(rowid)?;
+//! // Just check that the size is right.
+//! assert_eq!(blob.len(), 64);
+//! # Ok(())
+//! # }
+//! ```
+//!
+//! # Example (Positional)
//!
//! ```rust
-//! use rusqlite::blob::ZeroBlob;
-//! use rusqlite::{Connection, DatabaseName, NO_PARAMS};
-//! use std::error::Error;
-//! use std::io::{Read, Seek, SeekFrom, Write};
-//!
-//! fn main() -> Result<(), Box<Error>> {
-//! let db = Connection::open_in_memory()?;
-//! db.execute_batch("CREATE TABLE test (content BLOB);")?;
-//! db.execute(
-//! "INSERT INTO test (content) VALUES (ZEROBLOB(10))",
-//! NO_PARAMS,
-//! )?;
-//!
-//! let rowid = db.last_insert_rowid();
-//! let mut blob = db.blob_open(DatabaseName::Main, "test", "content", rowid, false)?;
-//!
-//! // Make sure to test that the number of bytes written matches what you expect;
-//! // if you try to write too much, the data will be truncated to the size of the
-//! // BLOB.
-//! let bytes_written = blob.write(b"01234567")?;
-//! assert_eq!(bytes_written, 8);
-//!
-//! // Same guidance - make sure you check the number of bytes read!
-//! blob.seek(SeekFrom::Start(0))?;
-//! let mut buf = [0u8; 20];
-//! let bytes_read = blob.read(&mut buf[..])?;
-//! assert_eq!(bytes_read, 10); // note we read 10 bytes because the blob has size 10
-//!
-//! db.execute("INSERT INTO test (content) VALUES (?)", &[ZeroBlob(64)])?;
-//!
-//! // given a new row ID, we can reopen the blob on that row
-//! let rowid = db.last_insert_rowid();
-//! blob.reopen(rowid)?;
-//!
-//! assert_eq!(blob.size(), 64);
-//! Ok(())
-//! }
+//! # use rusqlite::blob::ZeroBlob;
+//! # use rusqlite::{Connection, DatabaseName, NO_PARAMS};
+//! # use std::error::Error;
+//! # fn main() -> Result<(), Box<dyn Error>> {
+//! let db = Connection::open_in_memory()?;
+//! db.execute_batch("CREATE TABLE test_table (content BLOB);")?;
+//! // Insert a blob into the `content` column of `test_table`. Note that the Blob
+//! // I/O API provides no way of inserting or resizing blobs in the DB -- this
+//! // must be done via SQL.
+//! db.execute(
+//! "INSERT INTO test_table (content) VALUES (ZEROBLOB(10))",
+//! NO_PARAMS,
+//! )?;
+//! // Get the row id off the blob we just inserted.
+//! let rowid = db.last_insert_rowid();
+//! // Open the blob we just inserted for IO.
+//! let mut blob = db.blob_open(DatabaseName::Main, "test_table", "content", rowid, false)?;
+//! // Write some data into the blob.
+//! blob.write_at(b"ABCDEF", 2)?;
+//!
+//! // Read the whole blob into a local buffer.
+//! let mut buf = [0u8; 10];
+//! blob.read_at_exact(&mut buf, 0)?;
+//! assert_eq!(&buf, b"\0\0ABCDEF\0\0");
+//!
+//! // Insert another blob, this time using a parameter passed in from
+//! // rust (potentially with a dynamic size).
+//! db.execute("INSERT INTO test_table (content) VALUES (?)", &[ZeroBlob(64)])?;
+//!
+//! // given a new row ID, we can reopen the blob on that row
+//! let rowid = db.last_insert_rowid();
+//! blob.reopen(rowid)?;
+//! assert_eq!(blob.len(), 64);
+//! # Ok(())
+//! # }
//! ```
use std::cmp::min;
use std::io;
@@ -60,10 +194,14 @@ use super::ffi;
use super::types::{ToSql, ToSqlOutput};
use crate::{Connection, DatabaseName, Result};
-/// `feature = "blob"` Handle to an open BLOB.
+mod pos_io;
+
+/// `feature = "blob"` Handle to an open BLOB. See [`rusqlite::blob`](crate::blob) documentation for
+/// in-depth discussion.
pub struct Blob<'conn> {
conn: &'conn Connection,
blob: *mut ffi::sqlite3_blob,
+ // used by std::io implementations,
pos: i32,
}
@@ -128,6 +266,17 @@ impl Blob<'_> {
unsafe { ffi::sqlite3_blob_bytes(self.blob) }
}
+ /// Return the current size in bytes of the BLOB.
+ pub fn len(&self) -> usize {
+ use std::convert::TryInto;
+ self.size().try_into().unwrap()
+ }
+
+ /// Return true if the BLOB is empty.
+ pub fn is_empty(&self) -> bool {
+ self.size() == 0
+ }
+
/// Close a BLOB handle.
///
/// Calling `close` explicitly is not required (the BLOB will be closed
@@ -161,7 +310,8 @@ impl io::Read for Blob<'_> {
if n <= 0 {
return Ok(0);
}
- let rc = unsafe { ffi::sqlite3_blob_read(self.blob, buf.as_ptr() as *mut _, n, self.pos) };
+ let rc =
+ unsafe { ffi::sqlite3_blob_read(self.blob, buf.as_mut_ptr() as *mut _, n, self.pos) };
self.conn
.decode_result(rc)
.map(|_| {
diff --git a/src/blob/pos_io.rs b/src/blob/pos_io.rs
new file mode 100644
index 0000000..9f1f994
--- /dev/null
+++ b/src/blob/pos_io.rs
@@ -0,0 +1,281 @@
+use super::Blob;
+
+use std::convert::TryFrom;
+use std::mem::MaybeUninit;
+use std::slice::from_raw_parts_mut;
+
+use crate::ffi;
+use crate::{Error, Result};
+
+impl<'conn> Blob<'conn> {
+ /// Write `buf` to `self` starting at `write_start`, returning an error if
+ /// `write_start + buf.len()` is past the end of the blob.
+ ///
+ /// If an error is returned, no data is written.
+ ///
+ /// Note: the blob cannot be resized using this function -- that must be
+ /// done using SQL (for example, an `UPDATE` statement).
+ ///
+ /// Note: This is part of the positional I/O API, and thus takes an absolute
+ /// position write to, instead of using the internal position that can be
+ /// manipulated by the `std::io` traits.
+ ///
+ /// Unlike the similarly named [`FileExt::write_at`][fext_write_at] function
+ /// (from `std::os::unix`), it's always an error to perform a "short write".
+ ///
+ /// [fext_write_at]: https://doc.rust-lang.org/std/os/unix/fs/trait.FileExt.html#tymethod.write_at
+ #[inline]
+ pub fn write_at(&mut self, buf: &[u8], write_start: usize) -> Result<()> {
+ let len = self.len();
+
+ if buf.len().saturating_add(write_start) > len {
+ return Err(Error::BlobSizeError);
+ }
+ // We know `len` fits in an `i32`, so either:
+ //
+ // 1. `buf.len() + write_start` overflows, in which case we'd hit the
+ // return above (courtesy of `saturating_add`).
+ //
+ // 2. `buf.len() + write_start` doesn't overflow but is larger than len,
+ // in which case ditto.
+ //
+ // 3. `buf.len() + write_start` doesn't overflow but is less than len.
+ // This means that both `buf.len()` and `write_start` can also be
+ // losslessly converted to i32, since `len` came from an i32.
+ // Sanity check the above.
+ debug_assert!(i32::try_from(write_start).is_ok() && i32::try_from(buf.len()).is_ok());
+ unsafe {
+ check!(ffi::sqlite3_blob_write(
+ self.blob,
+ buf.as_ptr() as *const _,
+ buf.len() as i32,
+ write_start as i32,
+ ));
+ }
+ Ok(())
+ }
+
+ /// An alias for `write_at` provided for compatibility with the conceptually
+ /// equivalent [`std::os::unix::FileExt::write_all_at`][write_all_at]
+ /// function from libstd:
+ ///
+ /// [write_all_at]: https://doc.rust-lang.org/std/os/unix/fs/trait.FileExt.html#method.write_all_at
+ #[inline]
+ pub fn write_all_at(&mut self, buf: &[u8], write_start: usize) -> Result<()> {
+ self.write_at(buf, write_start)
+ }
+
+ /// Read as much as possible from `offset` to `offset + buf.len()` out of
+ /// `self`, writing into `buf`. On success, returns the number of bytes
+ /// written.
+ ///
+ /// If there's insufficient data in `self`, then the returned value will be
+ /// less than `buf.len()`.
+ ///
+ /// See also [`Blob::raw_read_at`], which can take an uninitialized buffer,
+ /// or [`Blob::read_at_exact`] which returns an error if the entire `buf` is
+ /// not read.
+ ///
+ /// Note: This is part of the positional I/O API, and thus takes an absolute
+ /// position to read from, instead of using the internal position that can
+ /// be manipulated by the `std::io` traits. Consequently, it does not change
+ /// that value either.
+ #[inline]
+ pub fn read_at(&self, buf: &mut [u8], read_start: usize) -> Result<usize> {
+ // Safety: this is safe because `raw_read_at` never stores uninitialized
+ // data into `as_uninit`.
+ let as_uninit: &mut [MaybeUninit<u8>] =
+ unsafe { from_raw_parts_mut(buf.as_mut_ptr() as *mut _, buf.len()) };
+ self.raw_read_at(as_uninit, read_start).map(|s| s.len())
+ }
+
+ /// Read as much as possible from `offset` to `offset + buf.len()` out of
+ /// `self`, writing into `buf`. On success, returns the portion of `buf`
+ /// which was initialized by this call.
+ ///
+ /// If there's insufficient data in `self`, then the returned value will be
+ /// shorter than `buf`.
+ ///
+ /// See also [`Blob::read_at`], which takes a `&mut [u8]` buffer instead of
+ /// a slice of `MaybeUninit<u8>`.
+ ///
+ /// Note: This is part of the positional I/O API, and thus takes an absolute
+ /// position to read from, instead of using the internal position that can
+ /// be manipulated by the `std::io` traits. Consequently, it does not change
+ /// that value either.
+ #[inline]
+ pub fn raw_read_at<'a>(
+ &self,
+ buf: &'a mut [MaybeUninit<u8>],
+ read_start: usize,
+ ) -> Result<&'a mut [u8]> {
+ let len = self.len();
+
+ let read_len = match len.checked_sub(read_start) {
+ None | Some(0) => 0,
+ Some(v) => v.min(buf.len()),
+ };
+
+ if read_len == 0 {
+ // We could return `Ok(&mut [])`, but it seems confusing that the
+ // pointers don't match, so fabricate a empty slice of u8 with the
+ // same base pointer as `buf`.
+ let empty = unsafe { from_raw_parts_mut(buf.as_mut_ptr() as *mut u8, 0) };
+ return Ok(empty);
+ }
+
+ // At this point we believe `read_start as i32` is lossless because:
+ //
+ // 1. `len as i32` is known to be lossless, since it comes from a SQLite
+ // api returning an i32.
+ //
+ // 2. If we got here, `len.checked_sub(read_start)` was Some (or else
+ // we'd have hit the `if read_len == 0` early return), so `len` must
+ // be larger than `read_start`, and so it must fit in i32 as well.
+ debug_assert!(i32::try_from(read_start).is_ok());
+
+ // We also believe that `read_start + read_len <= len` because:
+ //
+ // 1. This is equivalent to `read_len <= len - read_start` via algebra.
+ // 2. We know that `read_len` is `min(len - read_start, buf.len())`
+ // 3. Expanding, this is `min(len - read_start, buf.len()) <= len - read_start`,
+ // or `min(A, B) <= A` which is clearly true.
+ //
+ // Note that this stuff is in debug_assert so no need to use checked_add
+ // and such -- we'll always panic on overflow in debug builds.
+ debug_assert!(read_start + read_len <= len);
+
+ // These follow naturally.
+ debug_assert!(buf.len() >= read_len);
+ debug_assert!(i32::try_from(buf.len()).is_ok());
+ debug_assert!(i32::try_from(read_len).is_ok());
+
+ unsafe {
+ check!(ffi::sqlite3_blob_read(
+ self.blob,
+ buf.as_mut_ptr() as *mut _,
+ read_len as i32,
+ read_start as i32,
+ ));
+
+ Ok(from_raw_parts_mut(buf.as_mut_ptr() as *mut u8, read_len))
+ }
+ }
+
+ /// Equivalent to [`Blob::read_at`], but returns a `BlobSizeError` if `buf`
+ /// is not fully initialized.
+ #[inline]
+ pub fn read_at_exact(&self, buf: &mut [u8], read_start: usize) -> Result<()> {
+ let n = self.read_at(buf, read_start)?;
+ if n != buf.len() {
+ Err(Error::BlobSizeError)
+ } else {
+ Ok(())
+ }
+ }
+
+ /// Equivalent to [`Blob::raw_read_at`], but returns a `BlobSizeError` if
+ /// `buf` is not fully initialized.
+ #[inline]
+ pub fn raw_read_at_exact<'a>(
+ &self,
+ buf: &'a mut [MaybeUninit<u8>],
+ read_start: usize,
+ ) -> Result<&'a mut [u8]> {
+ let buflen = buf.len();
+ let initted = self.raw_read_at(buf, read_start)?;
+ if initted.len() != buflen {
+ Err(Error::BlobSizeError)
+ } else {
+ Ok(initted)
+ }
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use crate::{Connection, DatabaseName, NO_PARAMS};
+ // to ensure we don't modify seek pos
+ use std::io::Seek as _;
+
+ #[test]
+ fn test_pos_io() {
+ let db = Connection::open_in_memory().unwrap();
+ db.execute_batch("CREATE TABLE test_table(content BLOB);")
+ .unwrap();
+ db.execute(
+ "INSERT INTO test_table(content) VALUES (ZEROBLOB(10))",
+ NO_PARAMS,
+ )
+ .unwrap();
+
+ let rowid = db.last_insert_rowid();
+ let mut blob = db
+ .blob_open(DatabaseName::Main, "test_table", "content", rowid, false)
+ .unwrap();
+ // modify the seek pos to ensure we aren't using it or modifying it.
+ blob.seek(std::io::SeekFrom::Start(1)).unwrap();
+
+ let one2ten: [u8; 10] = [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+ blob.write_at(&one2ten, 0).unwrap();
+
+ let mut s = [0u8; 10];
+ blob.read_at_exact(&mut s, 0).unwrap();
+ assert_eq!(&s, &one2ten, "write should go through");
+ assert!(blob.read_at_exact(&mut s, 1).is_err());
+
+ blob.read_at_exact(&mut s, 0).unwrap();
+ assert_eq!(&s, &one2ten, "should be unchanged");
+
+ let mut fives = [0u8; 5];
+ blob.read_at_exact(&mut fives, 0).unwrap();
+ assert_eq!(&fives, &[1u8, 2, 3, 4, 5]);
+
+ blob.read_at_exact(&mut fives, 5).unwrap();
+ assert_eq!(&fives, &[6u8, 7, 8, 9, 10]);
+ assert!(blob.read_at_exact(&mut fives, 7).is_err());
+ assert!(blob.read_at_exact(&mut fives, 12).is_err());
+ assert!(blob.read_at_exact(&mut fives, 10).is_err());
+ assert!(blob.read_at_exact(&mut fives, i32::MAX as usize).is_err());
+ assert!(blob
+ .read_at_exact(&mut fives, i32::MAX as usize + 1)
+ .is_err());
+
+ // zero length writes are fine if in bounds
+ blob.read_at_exact(&mut [], 10).unwrap();
+ blob.read_at_exact(&mut [], 0).unwrap();
+ blob.read_at_exact(&mut [], 5).unwrap();
+
+ blob.write_all_at(&[16, 17, 18, 19, 20], 5).unwrap();
+ blob.read_at_exact(&mut s, 0).unwrap();
+ assert_eq!(&s, &[1u8, 2, 3, 4, 5, 16, 17, 18, 19, 20]);
+
+ assert!(blob.write_at(&[100, 99, 98, 97, 96], 6).is_err());
+ assert!(blob
+ .write_at(&[100, 99, 98, 97, 96], i32::MAX as usize)
+ .is_err());
+ assert!(blob
+ .write_at(&[100, 99, 98, 97, 96], i32::MAX as usize + 1)
+ .is_err());
+
+ blob.read_at_exact(&mut s, 0).unwrap();
+ assert_eq!(&s, &[1u8, 2, 3, 4, 5, 16, 17, 18, 19, 20]);
+
+ let mut s2: [std::mem::MaybeUninit<u8>; 10] = [std::mem::MaybeUninit::uninit(); 10];
+ {
+ let read = blob.raw_read_at_exact(&mut s2, 0).unwrap();
+ assert_eq!(read, &s);
+ assert!(std::ptr::eq(read.as_ptr(), s2.as_ptr().cast()));
+ }
+
+ let mut empty = [];
+ assert!(std::ptr::eq(
+ blob.raw_read_at_exact(&mut empty, 0).unwrap().as_ptr(),
+ empty.as_ptr().cast(),
+ ));
+ assert!(blob.raw_read_at_exact(&mut s2, 5).is_err());
+
+ let end_pos = blob.seek(std::io::SeekFrom::Current(0)).unwrap();
+ assert_eq!(end_pos, 1);
+ }
+}
diff --git a/src/cache.rs b/src/cache.rs
index 67ecd24..7dc9d23 100644
--- a/src/cache.rs
+++ b/src/cache.rs
@@ -2,7 +2,7 @@
use crate::raw_statement::RawStatement;
use crate::{Connection, Result, Statement};
-use lru_cache::LruCache;
+use hashlink::LruCache;
use std::cell::RefCell;
use std::ops::{Deref, DerefMut};
use std::sync::Arc;
@@ -54,7 +54,7 @@ impl Connection {
}
/// Prepared statements LRU cache.
-#[derive(Debug)]
+// #[derive(Debug)] // FIXME: https://github.com/kyren/hashlink/pull/4
pub struct StatementCache(RefCell<LruCache<Arc<str>, RawStatement>>);
/// Cacheable statement.
diff --git a/src/error.rs b/src/error.rs
index c05f8cc..98583cb 100644
--- a/src/error.rs
+++ b/src/error.rs
@@ -109,6 +109,12 @@ pub enum Error {
/// parameters in the query. The first `usize` is how many parameters were
/// given, the 2nd is how many were expected.
InvalidParameterCount(usize, usize),
+
+ /// Returned from various functions in the Blob IO positional API. For
+ /// example, [`Blob::raw_read_at_exact`](crate::blob::Blob::raw_read_at_exact)
+ /// will return it if the blob has insufficient data.
+ #[cfg(feature = "blob")]
+ BlobSizeError,
}
impl PartialEq for Error {
@@ -151,6 +157,8 @@ impl PartialEq for Error {
(Error::InvalidParameterCount(i1, n1), Error::InvalidParameterCount(i2, n2)) => {
i1 == i2 && n1 == n2
}
+ #[cfg(feature = "blob")]
+ (Error::BlobSizeError, Error::BlobSizeError) => true,
(..) => false,
}
}
@@ -262,6 +270,9 @@ impl fmt::Display for Error {
#[cfg(feature = "functions")]
Error::GetAuxWrongType => write!(f, "get_aux called with wrong type"),
Error::MultipleStatement => write!(f, "Multiple statements provided"),
+
+ #[cfg(feature = "blob")]
+ Error::BlobSizeError => "Blob size is insufficient".fmt(f),
}
}
}
@@ -306,6 +317,9 @@ impl error::Error for Error {
#[cfg(feature = "functions")]
Error::GetAuxWrongType => None,
+
+ #[cfg(feature = "blob")]
+ Error::BlobSizeError => None,
}
}
}
diff --git a/src/row.rs b/src/row.rs
index 3e536d9..36aa1a6 100644
--- a/src/row.rs
+++ b/src/row.rs
@@ -6,6 +6,7 @@ use super::{Error, Result, Statement};
use crate::types::{FromSql, FromSqlError, ValueRef};
/// An handle for the resulting rows of a query.
+#[must_use = "Rows is lazy and will do nothing unless consumed"]
pub struct Rows<'stmt> {
pub(crate) stmt: Option<&'stmt Statement<'stmt>>,
row: Option<Row<'stmt>>,
@@ -96,6 +97,7 @@ impl Drop for Rows<'_> {
}
/// `F` is used to tranform the _streaming_ iterator into a _fallible_ iterator.
+#[must_use = "iterators are lazy and do nothing unless consumed"]
pub struct Map<'stmt, F> {
rows: Rows<'stmt>,
f: F,
@@ -119,6 +121,7 @@ where
/// An iterator over the mapped resulting rows of a query.
///
/// `F` is used to tranform the _streaming_ iterator into a _standard_ iterator.
+#[must_use = "iterators are lazy and do nothing unless consumed"]
pub struct MappedRows<'stmt, F> {
rows: Rows<'stmt>,
map: F,
@@ -150,6 +153,7 @@ where
/// An iterator over the mapped resulting rows of a query, with an Error type
/// unifying with Error.
+#[must_use = "iterators are lazy and do nothing unless consumed"]
pub struct AndThenRows<'stmt, F> {
rows: Rows<'stmt>,
map: F,
diff --git a/src/types/mod.rs b/src/types/mod.rs
index 2d163cf..85d8ef2 100644
--- a/src/types/mod.rs
+++ b/src/types/mod.rs
@@ -17,7 +17,9 @@
//! [datetime](https://www.sqlite.org/lang_datefunc.html) functions. If you
//! want different storage for datetimes, you can use a newtype.
//!
-#![cfg_attr(feature = "time", doc = r##"
+#![cfg_attr(
+ feature = "time",
+ doc = r##"
For example, to store datetimes as `i64`s counting the number of seconds since
the Unix epoch:
@@ -42,7 +44,8 @@ impl ToSql for DateTimeSql {
}
```
-"##)]
+"##
+)]
//! `ToSql` and `FromSql` are also implemented for `Option<T>` where `T`
//! implements `ToSql` or `FromSql` for the cases where you want to know if a
//! value was NULL (which gets translated to `None`).
@@ -229,10 +232,7 @@ mod test {
#[allow(clippy::cognitive_complexity)]
fn test_mismatched_types() {
fn is_invalid_column_type(err: Error) -> bool {
- match err {
- Error::InvalidColumnType(..) => true,
- _ => false,
- }
+ matches!(err, Error::InvalidColumnType(..))
}
let db = checked_memory_handle();