aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorYiming Jing <yimingjing@google.com>2021-07-20 02:16:47 +0000
committerAutomerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>2021-07-20 02:16:47 +0000
commit7db07507c8027fe987ed7b89f9697f7a5b1c12c7 (patch)
tree26534300b3c67f649ec85621b9c6672788a6e68e
parent8351684c186432ad1fc48422b03592f9a84468bf (diff)
parent280d209107a1b924796b300458c0fff71c46d906 (diff)
downloadnum-bigint-7db07507c8027fe987ed7b89f9697f7a5b1c12c7.tar.gz
Initial import of num-bigint-0.4.0 am: cf21fc4146 am: 58beb70181 am: 7307763d4e am: 280d209107
Original change: https://android-review.googlesource.com/c/platform/external/rust/crates/num-bigint/+/1770565 Change-Id: I48da2b013744e3a6e060f9fc5868e518f08e2efa
-rw-r--r--.cargo_vcs_info.json5
-rw-r--r--.gitignore4
-rw-r--r--Cargo.toml80
l---------LICENSE1
-rw-r--r--LICENSE-APACHE201
-rw-r--r--LICENSE-MIT25
-rw-r--r--METADATA20
-rw-r--r--MODULE_LICENSE_APACHE20
-rw-r--r--OWNERS4
-rw-r--r--README.md81
-rw-r--r--RELEASES.md264
-rw-r--r--benches/bigint.rs420
-rw-r--r--benches/factorial.rs42
-rw-r--r--benches/gcd.rs76
-rw-r--r--benches/rng/mod.rs38
-rw-r--r--benches/roots.rs166
-rw-r--r--benches/shootout-pidigits.rs138
-rw-r--r--build.rs88
-rw-r--r--src/bigint.rs1147
-rw-r--r--src/bigint/addition.rs239
-rw-r--r--src/bigint/arbitrary.rs39
-rw-r--r--src/bigint/bits.rs531
-rw-r--r--src/bigint/convert.rs469
-rw-r--r--src/bigint/division.rs448
-rw-r--r--src/bigint/multiplication.rs192
-rw-r--r--src/bigint/power.rs94
-rw-r--r--src/bigint/serde.rs58
-rw-r--r--src/bigint/shift.rs107
-rw-r--r--src/bigint/subtraction.rs300
-rw-r--r--src/bigrand.rs283
-rw-r--r--src/biguint.rs1102
-rw-r--r--src/biguint/addition.rs254
-rw-r--r--src/biguint/arbitrary.rs34
-rw-r--r--src/biguint/bits.rs93
-rw-r--r--src/biguint/convert.rs756
-rw-r--r--src/biguint/division.rs615
-rw-r--r--src/biguint/iter.rs271
-rw-r--r--src/biguint/monty.rs225
-rw-r--r--src/biguint/multiplication.rs507
-rw-r--r--src/biguint/power.rs257
-rw-r--r--src/biguint/serde.rs108
-rw-r--r--src/biguint/shift.rs172
-rw-r--r--src/biguint/subtraction.rs312
-rw-r--r--src/lib.rs294
-rw-r--r--src/macros.rs441
-rw-r--r--tests/bigint.rs1402
-rw-r--r--tests/bigint_bitwise.rs178
-rw-r--r--tests/bigint_scalar.rs148
-rw-r--r--tests/biguint.rs1836
-rw-r--r--tests/biguint_scalar.rs113
-rw-r--r--tests/consts/mod.rs51
-rw-r--r--tests/macros/mod.rs78
-rw-r--r--tests/modpow.rs181
-rw-r--r--tests/roots.rs160
54 files changed, 15148 insertions, 0 deletions
diff --git a/.cargo_vcs_info.json b/.cargo_vcs_info.json
new file mode 100644
index 0000000..04ce902
--- /dev/null
+++ b/.cargo_vcs_info.json
@@ -0,0 +1,5 @@
+{
+ "git": {
+ "sha1": "d1e4498cbc02d22b3ddded7216c38d94e494cf90"
+ }
+}
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..6809567
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,4 @@
+Cargo.lock
+target
+*.bk
+*.orig
diff --git a/Cargo.toml b/Cargo.toml
new file mode 100644
index 0000000..9c2fdea
--- /dev/null
+++ b/Cargo.toml
@@ -0,0 +1,80 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies
+#
+# If you believe there's an error in this file please file an
+# issue against the rust-lang/cargo repository. If you're
+# editing this file be aware that the upstream Cargo.toml
+# will likely look very different (and much more reasonable)
+
+[package]
+edition = "2018"
+name = "num-bigint"
+version = "0.4.0"
+authors = ["The Rust Project Developers"]
+build = "build.rs"
+exclude = ["/bors.toml", "/ci/*", "/.github/*"]
+description = "Big integer implementation for Rust"
+homepage = "https://github.com/rust-num/num-bigint"
+documentation = "https://docs.rs/num-bigint"
+readme = "README.md"
+keywords = ["mathematics", "numerics", "bignum"]
+categories = ["algorithms", "data-structures", "science"]
+license = "MIT OR Apache-2.0"
+repository = "https://github.com/rust-num/num-bigint"
+[package.metadata.docs.rs]
+features = ["std", "serde", "rand", "quickcheck", "arbitrary"]
+
+[[bench]]
+name = "bigint"
+
+[[bench]]
+name = "factorial"
+
+[[bench]]
+name = "gcd"
+
+[[bench]]
+name = "roots"
+
+[[bench]]
+name = "shootout-pidigits"
+harness = false
+[dependencies.arbitrary]
+version = "1"
+optional = true
+default-features = false
+
+[dependencies.num-integer]
+version = "0.1.42"
+features = ["i128"]
+default-features = false
+
+[dependencies.num-traits]
+version = "0.2.11"
+features = ["i128"]
+default-features = false
+
+[dependencies.quickcheck]
+version = "1"
+optional = true
+default-features = false
+
+[dependencies.rand]
+version = "0.8"
+optional = true
+default-features = false
+
+[dependencies.serde]
+version = "1.0"
+optional = true
+default-features = false
+[build-dependencies.autocfg]
+version = "1"
+
+[features]
+default = ["std"]
+std = ["num-integer/std", "num-traits/std"]
diff --git a/LICENSE b/LICENSE
new file mode 120000
index 0000000..6b579aa
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1 @@
+LICENSE-APACHE \ No newline at end of file
diff --git a/LICENSE-APACHE b/LICENSE-APACHE
new file mode 100644
index 0000000..16fe87b
--- /dev/null
+++ b/LICENSE-APACHE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/LICENSE-MIT b/LICENSE-MIT
new file mode 100644
index 0000000..39d4bdb
--- /dev/null
+++ b/LICENSE-MIT
@@ -0,0 +1,25 @@
+Copyright (c) 2014 The Rust Project Developers
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/METADATA b/METADATA
new file mode 100644
index 0000000..4020657
--- /dev/null
+++ b/METADATA
@@ -0,0 +1,20 @@
+name: "num-bigint"
+description: "Big integer implementation for Rust"
+third_party {
+ url {
+ type: HOMEPAGE
+ value: "https://crates.io/crates/num-bigint"
+ }
+ url {
+ type: ARCHIVE
+ value: "https://static.crates.io/crates/num-bigint/num-bigint-0.4.0.crate"
+ }
+ version: "0.4.0"
+ # Dual-licensed, using the least restrictive per go/thirdpartylicenses#same.
+ license_type: NOTICE
+ last_upgrade_date {
+ year: 2021
+ month: 7
+ day: 9
+ }
+}
diff --git a/MODULE_LICENSE_APACHE2 b/MODULE_LICENSE_APACHE2
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/MODULE_LICENSE_APACHE2
diff --git a/OWNERS b/OWNERS
new file mode 100644
index 0000000..3f9a6ad
--- /dev/null
+++ b/OWNERS
@@ -0,0 +1,4 @@
+include platform/prebuilts/rust:master:/OWNERS
+# Android Auto
+skeys@google.com
+yimingjing@google.com
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..d1cedad
--- /dev/null
+++ b/README.md
@@ -0,0 +1,81 @@
+# num-bigint
+
+[![crate](https://img.shields.io/crates/v/num-bigint.svg)](https://crates.io/crates/num-bigint)
+[![documentation](https://docs.rs/num-bigint/badge.svg)](https://docs.rs/num-bigint)
+[![minimum rustc 1.31](https://img.shields.io/badge/rustc-1.31+-red.svg)](https://rust-lang.github.io/rfcs/2495-min-rust-version.html)
+[![build status](https://github.com/rust-num/num-bigint/workflows/master/badge.svg)](https://github.com/rust-num/num-bigint/actions)
+
+Big integer types for Rust, `BigInt` and `BigUint`.
+
+## Usage
+
+Add this to your `Cargo.toml`:
+
+```toml
+[dependencies]
+num-bigint = "0.4"
+```
+
+## Features
+
+The `std` crate feature is enabled by default, and is mandatory before Rust
+1.36 and the stabilized `alloc` crate. If you depend on `num-bigint` with
+`default-features = false`, you must manually enable the `std` feature yourself
+if your compiler is not new enough.
+
+### Random Generation
+
+`num-bigint` supports the generation of random big integers when the `rand`
+feature is enabled. To enable it include rand as
+
+```toml
+rand = "0.8"
+num-bigint = { version = "0.4", features = ["rand"] }
+```
+
+Note that you must use the version of `rand` that `num-bigint` is compatible
+with: `0.8`.
+
+## Releases
+
+Release notes are available in [RELEASES.md](RELEASES.md).
+
+## Compatibility
+
+The `num-bigint` crate is tested for rustc 1.31 and greater.
+
+## Alternatives
+
+While `num-bigint` strives for good performance in pure Rust code, other
+crates may offer better performance with different trade-offs. The following
+table offers a brief comparison to a few alternatives.
+
+| Crate | License | Min rustc | Implementation |
+| :--------------- | :------------- | :-------- | :------------- |
+| **`num-bigint`** | MIT/Apache-2.0 | 1.31 | pure rust |
+| [`ramp`] | Apache-2.0 | nightly | rust and inline assembly |
+| [`rug`] | LGPL-3.0+ | 1.37 | bundles [GMP] via [`gmp-mpfr-sys`] |
+| [`rust-gmp`] | MIT | stable? | links to [GMP] |
+| [`apint`] | MIT/Apache-2.0 | 1.26 | pure rust (unfinished) |
+
+[GMP]: https://gmplib.org/
+[`gmp-mpfr-sys`]: https://crates.io/crates/gmp-mpfr-sys
+[`rug`]: https://crates.io/crates/rug
+[`rust-gmp`]: https://crates.io/crates/rust-gmp
+[`ramp`]: https://crates.io/crates/ramp
+[`apint`]: https://crates.io/crates/apint
+
+## License
+
+Licensed under either of
+
+ * [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0)
+ * [MIT license](http://opensource.org/licenses/MIT)
+
+at your option.
+
+### Contribution
+
+Unless you explicitly state otherwise, any contribution intentionally submitted
+for inclusion in the work by you, as defined in the Apache-2.0 license, shall be
+dual licensed as above, without any additional terms or conditions.
diff --git a/RELEASES.md b/RELEASES.md
new file mode 100644
index 0000000..fd07b1e
--- /dev/null
+++ b/RELEASES.md
@@ -0,0 +1,264 @@
+# Release 0.4.0 (2021-03-05)
+
+### Breaking Changes
+
+- Updated public dependences on [arbitrary, quickcheck][194], and [rand][185]:
+ - `arbitrary` support has been updated to 1.0, requiring Rust 1.40.
+ - `quickcheck` support has been updated to 1.0, requiring Rust 1.46.
+ - `rand` support has been updated to 0.8, requiring Rust 1.36.
+- [`Debug` now shows plain numeric values for `BigInt` and `BigUint`][195],
+ rather than the raw list of internal digits.
+
+**Contributors**: @cuviper, @Gelbpunkt
+
+[185]: https://github.com/rust-num/num-bigint/pull/185
+[194]: https://github.com/rust-num/num-bigint/pull/194
+[195]: https://github.com/rust-num/num-bigint/pull/195
+
+# Release 0.3.2 (2021-03-04)
+
+- [The new `BigUint` methods `count_ones` and `trailing_ones`][175] return the
+ number of `1` bits in the entire value or just its least-significant tail,
+ respectively.
+- [The new `BigInt` and `BigUint` methods `bit` and `set_bit`][183] will read
+ and write individual bits of the value. For negative `BigInt`, bits are
+ determined as if they were in the two's complement representation.
+- [The `from_radix_le` and `from_radix_be` methods][187] now accept empty
+ buffers to represent zero.
+- [`BigInt` and `BigUint` can now iterate digits as `u32` or `u64`][192],
+ regardless of the actual internal digit size.
+
+**Contributors**: @BartMassey, @cuviper, @janmarthedal, @sebastianv89, @Speedy37
+
+[175]: https://github.com/rust-num/num-bigint/pull/175
+[183]: https://github.com/rust-num/num-bigint/pull/183
+[187]: https://github.com/rust-num/num-bigint/pull/187
+[192]: https://github.com/rust-num/num-bigint/pull/192
+
+# Release 0.3.1 (2020-11-03)
+
+- [Addition and subtraction now uses intrinsics][141] for performance on `x86`
+ and `x86_64` when built with Rust 1.33 or later.
+- [Conversions `to_f32` and `to_f64` now return infinity][163] for very large
+ numbers, rather than `None`. This does preserve the sign too, so a large
+ negative `BigInt` will convert to negative infinity.
+- [The optional `arbitrary` feature implements `arbitrary::Arbitrary`][166],
+ distinct from `quickcheck::Arbitrary`.
+- [The division algorithm has been optimized][170] to reduce the number of
+ temporary allocations and improve the internal guesses at each step.
+- [`BigInt` and `BigUint` will opportunistically shrink capacity][171] if the
+ internal vector is much larger than needed.
+
+**Contributors**: @cuviper, @e00E, @ejmahler, @notoria, @tczajka
+
+[141]: https://github.com/rust-num/num-bigint/pull/141
+[163]: https://github.com/rust-num/num-bigint/pull/163
+[166]: https://github.com/rust-num/num-bigint/pull/166
+[170]: https://github.com/rust-num/num-bigint/pull/170
+[171]: https://github.com/rust-num/num-bigint/pull/171
+
+# Release 0.3.0 (2020-06-12)
+
+### Enhancements
+
+- [The internal `BigDigit` may now be either `u32` or `u64`][62], although that
+ implementation detail is not exposed in the API. For now, this is chosen to
+ match the target pointer size, but may change in the future.
+- [No-`std` is now supported with the `alloc` crate on Rust 1.36][101].
+- [`Pow` is now implemented for bigint values][137], not just references.
+- [`TryFrom` is now implemented on Rust 1.34 and later][123], converting signed
+ integers to unsigned, and narrowing big integers to primitives.
+- [`Shl` and `Shr` are now implemented for a variety of shift types][142].
+- A new `trailing_zeros()` returns the number of consecutive zeros from the
+ least significant bit.
+- The new `BigInt::magnitude` and `into_parts` methods give access to its
+ `BigUint` part as the magnitude.
+
+### Breaking Changes
+
+- `num-bigint` now requires Rust 1.31 or greater.
+ - The "i128" opt-in feature was removed, now always available.
+- [Updated public dependences][110]:
+ - `rand` support has been updated to 0.7, requiring Rust 1.32.
+ - `quickcheck` support has been updated to 0.9, requiring Rust 1.34.
+- [Removed `impl Neg for BigUint`][145], which only ever panicked.
+- [Bit counts are now `u64` instead of `usize`][143].
+
+**Contributors**: @cuviper, @dignifiedquire, @hansihe,
+@kpcyrd, @milesand, @tech6hutch
+
+[62]: https://github.com/rust-num/num-bigint/pull/62
+[101]: https://github.com/rust-num/num-bigint/pull/101
+[110]: https://github.com/rust-num/num-bigint/pull/110
+[123]: https://github.com/rust-num/num-bigint/pull/123
+[137]: https://github.com/rust-num/num-bigint/pull/137
+[142]: https://github.com/rust-num/num-bigint/pull/142
+[143]: https://github.com/rust-num/num-bigint/pull/143
+[145]: https://github.com/rust-num/num-bigint/pull/145
+
+# Release 0.2.6 (2020-01-27)
+
+- [Fix the promotion of negative `isize` in `BigInt` assign-ops][133].
+
+**Contributors**: @cuviper, @HactarCE
+
+[133]: https://github.com/rust-num/num-bigint/pull/133
+
+# Release 0.2.5 (2020-01-09)
+
+- [Updated the `autocfg` build dependency to 1.0][126].
+
+**Contributors**: @cuviper, @tspiteri
+
+[126]: https://github.com/rust-num/num-bigint/pull/126
+
+# Release 0.2.4 (2020-01-01)
+
+- [The new `BigUint::to_u32_digits` method][104] returns the number as a
+ little-endian vector of base-2<sup>32</sup> digits. The same method on
+ `BigInt` also returns the sign.
+- [`BigUint::modpow` now applies a modulus even for exponent 1][113], which
+ also affects `BigInt::modpow`.
+- [`BigInt::modpow` now returns the correct sign for negative bases with even
+ exponents][114].
+
+[104]: https://github.com/rust-num/num-bigint/pull/104
+[113]: https://github.com/rust-num/num-bigint/pull/113
+[114]: https://github.com/rust-num/num-bigint/pull/114
+
+**Contributors**: @alex-ozdemir, @cuviper, @dingelish, @Speedy37, @youknowone
+
+# Release 0.2.3 (2019-09-03)
+
+- [`Pow` is now implemented for `BigUint` exponents][77].
+- [The optional `quickcheck` feature enables implementations of `Arbitrary`][99].
+- See the [full comparison][compare-0.2.3] for performance enhancements and more!
+
+[77]: https://github.com/rust-num/num-bigint/pull/77
+[99]: https://github.com/rust-num/num-bigint/pull/99
+[compare-0.2.3]: https://github.com/rust-num/num-bigint/compare/num-bigint-0.2.2...num-bigint-0.2.3
+
+**Contributors**: @cuviper, @lcnr, @maxbla, @mikelodder7, @mikong,
+@TheLetterTheta, @tspiteri, @XAMPPRocky, @youknowone
+
+# Release 0.2.2 (2018-12-14)
+
+- [The `Roots` implementations now use better initial guesses][71].
+- [Fixed `to_signed_bytes_*` for some positive numbers][72], where the
+ most-significant byte is `0x80` and the rest are `0`.
+
+[71]: https://github.com/rust-num/num-bigint/pull/71
+[72]: https://github.com/rust-num/num-bigint/pull/72
+
+**Contributors**: @cuviper, @leodasvacas
+
+# Release 0.2.1 (2018-11-02)
+
+- [`RandBigInt` now uses `Rng::fill_bytes`][53] to improve performance, instead
+ of repeated `gen::<u32>` calls. The also affects the implementations of the
+ other `rand` traits. This may potentially change the values produced by some
+ seeded RNGs on previous versions, but the values were tested to be stable
+ with `ChaChaRng`, `IsaacRng`, and `XorShiftRng`.
+- [`BigInt` and `BigUint` now implement `num_integer::Roots`][56].
+- [`BigInt` and `BigUint` now implement `num_traits::Pow`][54].
+- [`BigInt` and `BigUint` now implement operators with 128-bit integers][64].
+
+**Contributors**: @cuviper, @dignifiedquire, @mancabizjak, @Robbepop,
+@TheIronBorn, @thomwiggers
+
+[53]: https://github.com/rust-num/num-bigint/pull/53
+[54]: https://github.com/rust-num/num-bigint/pull/54
+[56]: https://github.com/rust-num/num-bigint/pull/56
+[64]: https://github.com/rust-num/num-bigint/pull/64
+
+# Release 0.2.0 (2018-05-25)
+
+### Enhancements
+
+- [`BigInt` and `BigUint` now implement `Product` and `Sum`][22] for iterators
+ of any item that we can `Mul` and `Add`, respectively. For example, a
+ factorial can now be simply: `let f: BigUint = (1u32..1000).product();`
+- [`BigInt` now supports two's-complement logic operations][26], namely
+ `BitAnd`, `BitOr`, `BitXor`, and `Not`. These act conceptually as if each
+ number had an infinite prefix of `0` or `1` bits for positive or negative.
+- [`BigInt` now supports assignment operators][41] like `AddAssign`.
+- [`BigInt` and `BigUint` now support conversions with `i128` and `u128`][44],
+ if sufficient compiler support is detected.
+- [`BigInt` and `BigUint` now implement rand's `SampleUniform` trait][48], and
+ [a custom `RandomBits` distribution samples by bit size][49].
+- The release also includes other miscellaneous improvements to performance.
+
+### Breaking Changes
+
+- [`num-bigint` now requires rustc 1.15 or greater][23].
+- [The crate now has a `std` feature, and won't build without it][46]. This is
+ in preparation for someday supporting `#![no_std]` with `alloc`.
+- [The `serde` dependency has been updated to 1.0][24], still disabled by
+ default. The `rustc-serialize` crate is no longer supported by `num-bigint`.
+- [The `rand` dependency has been updated to 0.5][48], now disabled by default.
+ This requires rustc 1.22 or greater for `rand`'s own requirement.
+- [`Shr for BigInt` now rounds down][8] rather than toward zero, matching the
+ behavior of the primitive integers for negative values.
+- [`ParseBigIntError` is now an opaque type][37].
+- [The `big_digit` module is no longer public][38], nor are the `BigDigit` and
+ `DoubleBigDigit` types and `ZERO_BIG_DIGIT` constant that were re-exported in
+ the crate root. Public APIs which deal in digits, like `BigUint::from_slice`,
+ will now always be base-`u32`.
+
+**Contributors**: @clarcharr, @cuviper, @dodomorandi, @tiehuis, @tspiteri
+
+[8]: https://github.com/rust-num/num-bigint/pull/8
+[22]: https://github.com/rust-num/num-bigint/pull/22
+[23]: https://github.com/rust-num/num-bigint/pull/23
+[24]: https://github.com/rust-num/num-bigint/pull/24
+[26]: https://github.com/rust-num/num-bigint/pull/26
+[37]: https://github.com/rust-num/num-bigint/pull/37
+[38]: https://github.com/rust-num/num-bigint/pull/38
+[41]: https://github.com/rust-num/num-bigint/pull/41
+[44]: https://github.com/rust-num/num-bigint/pull/44
+[46]: https://github.com/rust-num/num-bigint/pull/46
+[48]: https://github.com/rust-num/num-bigint/pull/48
+[49]: https://github.com/rust-num/num-bigint/pull/49
+
+# Release 0.1.44 (2018-05-14)
+
+- [Division with single-digit divisors is now much faster.][42]
+- The README now compares [`ramp`, `rug`, `rust-gmp`][20], and [`apint`][21].
+
+**Contributors**: @cuviper, @Robbepop
+
+[20]: https://github.com/rust-num/num-bigint/pull/20
+[21]: https://github.com/rust-num/num-bigint/pull/21
+[42]: https://github.com/rust-num/num-bigint/pull/42
+
+# Release 0.1.43 (2018-02-08)
+
+- [The new `BigInt::modpow`][18] performs signed modular exponentiation, using
+ the existing `BigUint::modpow` and rounding negatives similar to `mod_floor`.
+
+**Contributors**: @cuviper
+
+[18]: https://github.com/rust-num/num-bigint/pull/18
+
+
+# Release 0.1.42 (2018-02-07)
+
+- [num-bigint now has its own source repository][num-356] at [rust-num/num-bigint][home].
+- [`lcm` now avoids creating a large intermediate product][num-350].
+- [`gcd` now uses Stein's algorithm][15] with faster shifts instead of division.
+- [`rand` support is now extended to 0.4][11] (while still allowing 0.3).
+
+**Contributors**: @cuviper, @Emerentius, @ignatenkobrain, @mhogrefe
+
+[home]: https://github.com/rust-num/num-bigint
+[num-350]: https://github.com/rust-num/num/pull/350
+[num-356]: https://github.com/rust-num/num/pull/356
+[11]: https://github.com/rust-num/num-bigint/pull/11
+[15]: https://github.com/rust-num/num-bigint/pull/15
+
+
+# Prior releases
+
+No prior release notes were kept. Thanks all the same to the many
+contributors that have made this crate what it is!
+
diff --git a/benches/bigint.rs b/benches/bigint.rs
new file mode 100644
index 0000000..b7f5fd2
--- /dev/null
+++ b/benches/bigint.rs
@@ -0,0 +1,420 @@
+#![feature(test)]
+#![cfg(feature = "rand")]
+
+extern crate test;
+
+use num_bigint::{BigInt, BigUint, RandBigInt};
+use num_traits::{FromPrimitive, Num, One, Zero};
+use std::mem::replace;
+use test::Bencher;
+
+mod rng;
+use rng::get_rng;
+
+fn multiply_bench(b: &mut Bencher, xbits: u64, ybits: u64) {
+ let mut rng = get_rng();
+ let x = rng.gen_bigint(xbits);
+ let y = rng.gen_bigint(ybits);
+
+ b.iter(|| &x * &y);
+}
+
+fn divide_bench(b: &mut Bencher, xbits: u64, ybits: u64) {
+ let mut rng = get_rng();
+ let x = rng.gen_bigint(xbits);
+ let y = rng.gen_bigint(ybits);
+
+ b.iter(|| &x / &y);
+}
+
+fn remainder_bench(b: &mut Bencher, xbits: u64, ybits: u64) {
+ let mut rng = get_rng();
+ let x = rng.gen_bigint(xbits);
+ let y = rng.gen_bigint(ybits);
+
+ b.iter(|| &x % &y);
+}
+
+fn factorial(n: usize) -> BigUint {
+ let mut f: BigUint = One::one();
+ for i in 1..=n {
+ let bu: BigUint = FromPrimitive::from_usize(i).unwrap();
+ f += bu;
+ }
+ f
+}
+
+/// Compute Fibonacci numbers
+fn fib(n: usize) -> BigUint {
+ let mut f0: BigUint = Zero::zero();
+ let mut f1: BigUint = One::one();
+ for _ in 0..n {
+ let f2 = f0 + &f1;
+ f0 = replace(&mut f1, f2);
+ }
+ f0
+}
+
+/// Compute Fibonacci numbers with two ops per iteration
+/// (add and subtract, like issue #200)
+fn fib2(n: usize) -> BigUint {
+ let mut f0: BigUint = Zero::zero();
+ let mut f1: BigUint = One::one();
+ for _ in 0..n {
+ f1 += &f0;
+ f0 = &f1 - f0;
+ }
+ f0
+}
+
+#[bench]
+fn multiply_0(b: &mut Bencher) {
+ multiply_bench(b, 1 << 8, 1 << 8);
+}
+
+#[bench]
+fn multiply_1(b: &mut Bencher) {
+ multiply_bench(b, 1 << 8, 1 << 16);
+}
+
+#[bench]
+fn multiply_2(b: &mut Bencher) {
+ multiply_bench(b, 1 << 16, 1 << 16);
+}
+
+#[bench]
+fn multiply_3(b: &mut Bencher) {
+ multiply_bench(b, 1 << 16, 1 << 17);
+}
+
+#[bench]
+fn divide_0(b: &mut Bencher) {
+ divide_bench(b, 1 << 8, 1 << 6);
+}
+
+#[bench]
+fn divide_1(b: &mut Bencher) {
+ divide_bench(b, 1 << 12, 1 << 8);
+}
+
+#[bench]
+fn divide_2(b: &mut Bencher) {
+ divide_bench(b, 1 << 16, 1 << 12);
+}
+
+#[bench]
+fn divide_big_little(b: &mut Bencher) {
+ divide_bench(b, 1 << 16, 1 << 4);
+}
+
+#[bench]
+fn remainder_0(b: &mut Bencher) {
+ remainder_bench(b, 1 << 8, 1 << 6);
+}
+
+#[bench]
+fn remainder_1(b: &mut Bencher) {
+ remainder_bench(b, 1 << 12, 1 << 8);
+}
+
+#[bench]
+fn remainder_2(b: &mut Bencher) {
+ remainder_bench(b, 1 << 16, 1 << 12);
+}
+
+#[bench]
+fn remainder_big_little(b: &mut Bencher) {
+ remainder_bench(b, 1 << 16, 1 << 4);
+}
+
+#[bench]
+fn factorial_100(b: &mut Bencher) {
+ b.iter(|| factorial(100));
+}
+
+#[bench]
+fn fib_100(b: &mut Bencher) {
+ b.iter(|| fib(100));
+}
+
+#[bench]
+fn fib_1000(b: &mut Bencher) {
+ b.iter(|| fib(1000));
+}
+
+#[bench]
+fn fib_10000(b: &mut Bencher) {
+ b.iter(|| fib(10000));
+}
+
+#[bench]
+fn fib2_100(b: &mut Bencher) {
+ b.iter(|| fib2(100));
+}
+
+#[bench]
+fn fib2_1000(b: &mut Bencher) {
+ b.iter(|| fib2(1000));
+}
+
+#[bench]
+fn fib2_10000(b: &mut Bencher) {
+ b.iter(|| fib2(10000));
+}
+
+#[bench]
+fn fac_to_string(b: &mut Bencher) {
+ let fac = factorial(100);
+ b.iter(|| fac.to_string());
+}
+
+#[bench]
+fn fib_to_string(b: &mut Bencher) {
+ let fib = fib(100);
+ b.iter(|| fib.to_string());
+}
+
+fn to_str_radix_bench(b: &mut Bencher, radix: u32) {
+ let mut rng = get_rng();
+ let x = rng.gen_bigint(1009);
+ b.iter(|| x.to_str_radix(radix));
+}
+
+#[bench]
+fn to_str_radix_02(b: &mut Bencher) {
+ to_str_radix_bench(b, 2);
+}
+
+#[bench]
+fn to_str_radix_08(b: &mut Bencher) {
+ to_str_radix_bench(b, 8);
+}
+
+#[bench]
+fn to_str_radix_10(b: &mut Bencher) {
+ to_str_radix_bench(b, 10);
+}
+
+#[bench]
+fn to_str_radix_16(b: &mut Bencher) {
+ to_str_radix_bench(b, 16);
+}
+
+#[bench]
+fn to_str_radix_36(b: &mut Bencher) {
+ to_str_radix_bench(b, 36);
+}
+
+fn from_str_radix_bench(b: &mut Bencher, radix: u32) {
+ let mut rng = get_rng();
+ let x = rng.gen_bigint(1009);
+ let s = x.to_str_radix(radix);
+ assert_eq!(x, BigInt::from_str_radix(&s, radix).unwrap());
+ b.iter(|| BigInt::from_str_radix(&s, radix));
+}
+
+#[bench]
+fn from_str_radix_02(b: &mut Bencher) {
+ from_str_radix_bench(b, 2);
+}
+
+#[bench]
+fn from_str_radix_08(b: &mut Bencher) {
+ from_str_radix_bench(b, 8);
+}
+
+#[bench]
+fn from_str_radix_10(b: &mut Bencher) {
+ from_str_radix_bench(b, 10);
+}
+
+#[bench]
+fn from_str_radix_16(b: &mut Bencher) {
+ from_str_radix_bench(b, 16);
+}
+
+#[bench]
+fn from_str_radix_36(b: &mut Bencher) {
+ from_str_radix_bench(b, 36);
+}
+
+fn rand_bench(b: &mut Bencher, bits: u64) {
+ let mut rng = get_rng();
+
+ b.iter(|| rng.gen_bigint(bits));
+}
+
+#[bench]
+fn rand_64(b: &mut Bencher) {
+ rand_bench(b, 1 << 6);
+}
+
+#[bench]
+fn rand_256(b: &mut Bencher) {
+ rand_bench(b, 1 << 8);
+}
+
+#[bench]
+fn rand_1009(b: &mut Bencher) {
+ rand_bench(b, 1009);
+}
+
+#[bench]
+fn rand_2048(b: &mut Bencher) {
+ rand_bench(b, 1 << 11);
+}
+
+#[bench]
+fn rand_4096(b: &mut Bencher) {
+ rand_bench(b, 1 << 12);
+}
+
+#[bench]
+fn rand_8192(b: &mut Bencher) {
+ rand_bench(b, 1 << 13);
+}
+
+#[bench]
+fn rand_65536(b: &mut Bencher) {
+ rand_bench(b, 1 << 16);
+}
+
+#[bench]
+fn rand_131072(b: &mut Bencher) {
+ rand_bench(b, 1 << 17);
+}
+
+#[bench]
+fn shl(b: &mut Bencher) {
+ let n = BigUint::one() << 1000u32;
+ let mut m = n.clone();
+ b.iter(|| {
+ m.clone_from(&n);
+ for i in 0..50 {
+ m <<= i;
+ }
+ })
+}
+
+#[bench]
+fn shr(b: &mut Bencher) {
+ let n = BigUint::one() << 2000u32;
+ let mut m = n.clone();
+ b.iter(|| {
+ m.clone_from(&n);
+ for i in 0..50 {
+ m >>= i;
+ }
+ })
+}
+
+#[bench]
+fn hash(b: &mut Bencher) {
+ use std::collections::HashSet;
+ let mut rng = get_rng();
+ let v: Vec<BigInt> = (1000..2000).map(|bits| rng.gen_bigint(bits)).collect();
+ b.iter(|| {
+ let h: HashSet<&BigInt> = v.iter().collect();
+ assert_eq!(h.len(), v.len());
+ });
+}
+
+#[bench]
+fn pow_bench(b: &mut Bencher) {
+ b.iter(|| {
+ let upper = 100_u32;
+ let mut i_big = BigUint::from(1u32);
+ for _i in 2..=upper {
+ i_big += 1u32;
+ for j in 2..=upper {
+ i_big.pow(j);
+ }
+ }
+ });
+}
+
+#[bench]
+fn pow_bench_bigexp(b: &mut Bencher) {
+ use num_traits::Pow;
+
+ b.iter(|| {
+ let upper = 100_u32;
+ let mut i_big = BigUint::from(1u32);
+ for _i in 2..=upper {
+ i_big += 1u32;
+ let mut j_big = BigUint::from(1u32);
+ for _j in 2..=upper {
+ j_big += 1u32;
+ Pow::pow(&i_big, &j_big);
+ }
+ }
+ });
+}
+
+/// This modulus is the prime from the 2048-bit MODP DH group:
+/// https://tools.ietf.org/html/rfc3526#section-3
+const RFC3526_2048BIT_MODP_GROUP: &str = "\
+ FFFFFFFF_FFFFFFFF_C90FDAA2_2168C234_C4C6628B_80DC1CD1\
+ 29024E08_8A67CC74_020BBEA6_3B139B22_514A0879_8E3404DD\
+ EF9519B3_CD3A431B_302B0A6D_F25F1437_4FE1356D_6D51C245\
+ E485B576_625E7EC6_F44C42E9_A637ED6B_0BFF5CB6_F406B7ED\
+ EE386BFB_5A899FA5_AE9F2411_7C4B1FE6_49286651_ECE45B3D\
+ C2007CB8_A163BF05_98DA4836_1C55D39A_69163FA8_FD24CF5F\
+ 83655D23_DCA3AD96_1C62F356_208552BB_9ED52907_7096966D\
+ 670C354E_4ABC9804_F1746C08_CA18217C_32905E46_2E36CE3B\
+ E39E772C_180E8603_9B2783A2_EC07A28F_B5C55DF0_6F4C52C9\
+ DE2BCBF6_95581718_3995497C_EA956AE5_15D22618_98FA0510\
+ 15728E5A_8AACAA68_FFFFFFFF_FFFFFFFF";
+
+#[bench]
+fn modpow(b: &mut Bencher) {
+ let mut rng = get_rng();
+ let base = rng.gen_biguint(2048);
+ let e = rng.gen_biguint(2048);
+ let m = BigUint::from_str_radix(RFC3526_2048BIT_MODP_GROUP, 16).unwrap();
+
+ b.iter(|| base.modpow(&e, &m));
+}
+
+#[bench]
+fn modpow_even(b: &mut Bencher) {
+ let mut rng = get_rng();
+ let base = rng.gen_biguint(2048);
+ let e = rng.gen_biguint(2048);
+ // Make the modulus even, so monty (base-2^32) doesn't apply.
+ let m = BigUint::from_str_radix(RFC3526_2048BIT_MODP_GROUP, 16).unwrap() - 1u32;
+
+ b.iter(|| base.modpow(&e, &m));
+}
+
+#[bench]
+fn to_u32_digits(b: &mut Bencher) {
+ let mut rng = get_rng();
+ let n = rng.gen_biguint(2048);
+
+ b.iter(|| n.to_u32_digits());
+}
+
+#[bench]
+fn iter_u32_digits(b: &mut Bencher) {
+ let mut rng = get_rng();
+ let n = rng.gen_biguint(2048);
+
+ b.iter(|| n.iter_u32_digits().max());
+}
+
+#[bench]
+fn to_u64_digits(b: &mut Bencher) {
+ let mut rng = get_rng();
+ let n = rng.gen_biguint(2048);
+
+ b.iter(|| n.to_u64_digits());
+}
+
+#[bench]
+fn iter_u64_digits(b: &mut Bencher) {
+ let mut rng = get_rng();
+ let n = rng.gen_biguint(2048);
+
+ b.iter(|| n.iter_u64_digits().max());
+}
diff --git a/benches/factorial.rs b/benches/factorial.rs
new file mode 100644
index 0000000..a1e7b3c
--- /dev/null
+++ b/benches/factorial.rs
@@ -0,0 +1,42 @@
+#![feature(test)]
+
+extern crate test;
+
+use num_bigint::BigUint;
+use num_traits::One;
+use std::ops::{Div, Mul};
+use test::Bencher;
+
+#[bench]
+fn factorial_mul_biguint(b: &mut Bencher) {
+ b.iter(|| {
+ (1u32..1000)
+ .map(BigUint::from)
+ .fold(BigUint::one(), Mul::mul)
+ });
+}
+
+#[bench]
+fn factorial_mul_u32(b: &mut Bencher) {
+ b.iter(|| (1u32..1000).fold(BigUint::one(), Mul::mul));
+}
+
+// The division test is inspired by this blog comparison:
+// <https://tiehuis.github.io/big-integers-in-zig#division-test-single-limb>
+
+#[bench]
+fn factorial_div_biguint(b: &mut Bencher) {
+ let n: BigUint = (1u32..1000).fold(BigUint::one(), Mul::mul);
+ b.iter(|| {
+ (1u32..1000)
+ .rev()
+ .map(BigUint::from)
+ .fold(n.clone(), Div::div)
+ });
+}
+
+#[bench]
+fn factorial_div_u32(b: &mut Bencher) {
+ let n: BigUint = (1u32..1000).fold(BigUint::one(), Mul::mul);
+ b.iter(|| (1u32..1000).rev().fold(n.clone(), Div::div));
+}
diff --git a/benches/gcd.rs b/benches/gcd.rs
new file mode 100644
index 0000000..c211b6e
--- /dev/null
+++ b/benches/gcd.rs
@@ -0,0 +1,76 @@
+#![feature(test)]
+#![cfg(feature = "rand")]
+
+extern crate test;
+
+use num_bigint::{BigUint, RandBigInt};
+use num_integer::Integer;
+use num_traits::Zero;
+use test::Bencher;
+
+mod rng;
+use rng::get_rng;
+
+fn bench(b: &mut Bencher, bits: u64, gcd: fn(&BigUint, &BigUint) -> BigUint) {
+ let mut rng = get_rng();
+ let x = rng.gen_biguint(bits);
+ let y = rng.gen_biguint(bits);
+
+ assert_eq!(euclid(&x, &y), x.gcd(&y));
+
+ b.iter(|| gcd(&x, &y));
+}
+
+fn euclid(x: &BigUint, y: &BigUint) -> BigUint {
+ // Use Euclid's algorithm
+ let mut m = x.clone();
+ let mut n = y.clone();
+ while !m.is_zero() {
+ let temp = m;
+ m = n % &temp;
+ n = temp;
+ }
+ n
+}
+
+#[bench]
+fn gcd_euclid_0064(b: &mut Bencher) {
+ bench(b, 64, euclid);
+}
+
+#[bench]
+fn gcd_euclid_0256(b: &mut Bencher) {
+ bench(b, 256, euclid);
+}
+
+#[bench]
+fn gcd_euclid_1024(b: &mut Bencher) {
+ bench(b, 1024, euclid);
+}
+
+#[bench]
+fn gcd_euclid_4096(b: &mut Bencher) {
+ bench(b, 4096, euclid);
+}
+
+// Integer for BigUint now uses Stein for gcd
+
+#[bench]
+fn gcd_stein_0064(b: &mut Bencher) {
+ bench(b, 64, BigUint::gcd);
+}
+
+#[bench]
+fn gcd_stein_0256(b: &mut Bencher) {
+ bench(b, 256, BigUint::gcd);
+}
+
+#[bench]
+fn gcd_stein_1024(b: &mut Bencher) {
+ bench(b, 1024, BigUint::gcd);
+}
+
+#[bench]
+fn gcd_stein_4096(b: &mut Bencher) {
+ bench(b, 4096, BigUint::gcd);
+}
diff --git a/benches/rng/mod.rs b/benches/rng/mod.rs
new file mode 100644
index 0000000..33e4f0f
--- /dev/null
+++ b/benches/rng/mod.rs
@@ -0,0 +1,38 @@
+use rand::RngCore;
+
+pub(crate) fn get_rng() -> impl RngCore {
+ XorShiftStar {
+ a: 0x0123_4567_89AB_CDEF,
+ }
+}
+
+/// Simple `Rng` for benchmarking without additional dependencies
+struct XorShiftStar {
+ a: u64,
+}
+
+impl RngCore for XorShiftStar {
+ fn next_u32(&mut self) -> u32 {
+ self.next_u64() as u32
+ }
+
+ fn next_u64(&mut self) -> u64 {
+ // https://en.wikipedia.org/wiki/Xorshift#xorshift*
+ self.a ^= self.a >> 12;
+ self.a ^= self.a << 25;
+ self.a ^= self.a >> 27;
+ self.a.wrapping_mul(0x2545_F491_4F6C_DD1D)
+ }
+
+ fn fill_bytes(&mut self, dest: &mut [u8]) {
+ for chunk in dest.chunks_mut(8) {
+ let bytes = self.next_u64().to_le_bytes();
+ let slice = &bytes[..chunk.len()];
+ chunk.copy_from_slice(slice)
+ }
+ }
+
+ fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), rand::Error> {
+ Ok(self.fill_bytes(dest))
+ }
+}
diff --git a/benches/roots.rs b/benches/roots.rs
new file mode 100644
index 0000000..7afc4f7
--- /dev/null
+++ b/benches/roots.rs
@@ -0,0 +1,166 @@
+#![feature(test)]
+#![cfg(feature = "rand")]
+
+extern crate test;
+
+use num_bigint::{BigUint, RandBigInt};
+use test::Bencher;
+
+mod rng;
+use rng::get_rng;
+
+// The `big64` cases demonstrate the speed of cases where the value
+// can be converted to a `u64` primitive for faster calculation.
+//
+// The `big1k` cases demonstrate those that can convert to `f64` for
+// a better initial guess of the actual value.
+//
+// The `big2k` and `big4k` cases are too big for `f64`, and use a simpler guess.
+
+fn check(x: &BigUint, n: u32) {
+ let root = x.nth_root(n);
+ if n == 2 {
+ assert_eq!(root, x.sqrt())
+ } else if n == 3 {
+ assert_eq!(root, x.cbrt())
+ }
+
+ let lo = root.pow(n);
+ assert!(lo <= *x);
+ assert_eq!(lo.nth_root(n), root);
+ assert_eq!((&lo - 1u32).nth_root(n), &root - 1u32);
+
+ let hi = (&root + 1u32).pow(n);
+ assert!(hi > *x);
+ assert_eq!(hi.nth_root(n), &root + 1u32);
+ assert_eq!((&hi - 1u32).nth_root(n), root);
+}
+
+fn bench_sqrt(b: &mut Bencher, bits: u64) {
+ let x = get_rng().gen_biguint(bits);
+ eprintln!("bench_sqrt({})", x);
+
+ check(&x, 2);
+ b.iter(|| x.sqrt());
+}
+
+#[bench]
+fn big64_sqrt(b: &mut Bencher) {
+ bench_sqrt(b, 64);
+}
+
+#[bench]
+fn big1k_sqrt(b: &mut Bencher) {
+ bench_sqrt(b, 1024);
+}
+
+#[bench]
+fn big2k_sqrt(b: &mut Bencher) {
+ bench_sqrt(b, 2048);
+}
+
+#[bench]
+fn big4k_sqrt(b: &mut Bencher) {
+ bench_sqrt(b, 4096);
+}
+
+fn bench_cbrt(b: &mut Bencher, bits: u64) {
+ let x = get_rng().gen_biguint(bits);
+ eprintln!("bench_cbrt({})", x);
+
+ check(&x, 3);
+ b.iter(|| x.cbrt());
+}
+
+#[bench]
+fn big64_cbrt(b: &mut Bencher) {
+ bench_cbrt(b, 64);
+}
+
+#[bench]
+fn big1k_cbrt(b: &mut Bencher) {
+ bench_cbrt(b, 1024);
+}
+
+#[bench]
+fn big2k_cbrt(b: &mut Bencher) {
+ bench_cbrt(b, 2048);
+}
+
+#[bench]
+fn big4k_cbrt(b: &mut Bencher) {
+ bench_cbrt(b, 4096);
+}
+
+fn bench_nth_root(b: &mut Bencher, bits: u64, n: u32) {
+ let x = get_rng().gen_biguint(bits);
+ eprintln!("bench_{}th_root({})", n, x);
+
+ check(&x, n);
+ b.iter(|| x.nth_root(n));
+}
+
+#[bench]
+fn big64_nth_10(b: &mut Bencher) {
+ bench_nth_root(b, 64, 10);
+}
+
+#[bench]
+fn big1k_nth_10(b: &mut Bencher) {
+ bench_nth_root(b, 1024, 10);
+}
+
+#[bench]
+fn big1k_nth_100(b: &mut Bencher) {
+ bench_nth_root(b, 1024, 100);
+}
+
+#[bench]
+fn big1k_nth_1000(b: &mut Bencher) {
+ bench_nth_root(b, 1024, 1000);
+}
+
+#[bench]
+fn big1k_nth_10000(b: &mut Bencher) {
+ bench_nth_root(b, 1024, 10000);
+}
+
+#[bench]
+fn big2k_nth_10(b: &mut Bencher) {
+ bench_nth_root(b, 2048, 10);
+}
+
+#[bench]
+fn big2k_nth_100(b: &mut Bencher) {
+ bench_nth_root(b, 2048, 100);
+}
+
+#[bench]
+fn big2k_nth_1000(b: &mut Bencher) {
+ bench_nth_root(b, 2048, 1000);
+}
+
+#[bench]
+fn big2k_nth_10000(b: &mut Bencher) {
+ bench_nth_root(b, 2048, 10000);
+}
+
+#[bench]
+fn big4k_nth_10(b: &mut Bencher) {
+ bench_nth_root(b, 4096, 10);
+}
+
+#[bench]
+fn big4k_nth_100(b: &mut Bencher) {
+ bench_nth_root(b, 4096, 100);
+}
+
+#[bench]
+fn big4k_nth_1000(b: &mut Bencher) {
+ bench_nth_root(b, 4096, 1000);
+}
+
+#[bench]
+fn big4k_nth_10000(b: &mut Bencher) {
+ bench_nth_root(b, 4096, 10000);
+}
diff --git a/benches/shootout-pidigits.rs b/benches/shootout-pidigits.rs
new file mode 100644
index 0000000..b95d42c
--- /dev/null
+++ b/benches/shootout-pidigits.rs
@@ -0,0 +1,138 @@
+// The Computer Language Benchmarks Game
+// http://benchmarksgame.alioth.debian.org/
+//
+// contributed by the Rust Project Developers
+
+// Copyright (c) 2013-2014 The Rust Project Developers
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// - Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// - Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in
+// the documentation and/or other materials provided with the
+// distribution.
+//
+// - Neither the name of "The Computer Language Benchmarks Game" nor
+// the name of "The Computer Language Shootout Benchmarks" nor the
+// names of its contributors may be used to endorse or promote
+// products derived from this software without specific prior
+// written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+// OF THE POSSIBILITY OF SUCH DAMAGE.
+
+use std::io;
+use std::str::FromStr;
+
+use num_bigint::BigInt;
+use num_integer::Integer;
+use num_traits::{FromPrimitive, One, ToPrimitive, Zero};
+
+struct Context {
+ numer: BigInt,
+ accum: BigInt,
+ denom: BigInt,
+}
+
+impl Context {
+ fn new() -> Context {
+ Context {
+ numer: One::one(),
+ accum: Zero::zero(),
+ denom: One::one(),
+ }
+ }
+
+ fn from_i32(i: i32) -> BigInt {
+ FromPrimitive::from_i32(i).unwrap()
+ }
+
+ fn extract_digit(&self) -> i32 {
+ if self.numer > self.accum {
+ return -1;
+ }
+ let (q, r) = (&self.numer * Context::from_i32(3) + &self.accum).div_rem(&self.denom);
+ if r + &self.numer >= self.denom {
+ return -1;
+ }
+ q.to_i32().unwrap()
+ }
+
+ fn next_term(&mut self, k: i32) {
+ let y2 = Context::from_i32(k * 2 + 1);
+ self.accum = (&self.accum + (&self.numer << 1)) * &y2;
+ self.numer = &self.numer * Context::from_i32(k);
+ self.denom = &self.denom * y2;
+ }
+
+ fn eliminate_digit(&mut self, d: i32) {
+ let d = Context::from_i32(d);
+ let ten = Context::from_i32(10);
+ self.accum = (&self.accum - &self.denom * d) * &ten;
+ self.numer = &self.numer * ten;
+ }
+}
+
+fn pidigits(n: isize, out: &mut dyn io::Write) -> io::Result<()> {
+ let mut k = 0;
+ let mut context = Context::new();
+
+ for i in 1..=n {
+ let mut d;
+ loop {
+ k += 1;
+ context.next_term(k);
+ d = context.extract_digit();
+ if d != -1 {
+ break;
+ }
+ }
+
+ write!(out, "{}", d)?;
+ if i % 10 == 0 {
+ writeln!(out, "\t:{}", i)?;
+ }
+
+ context.eliminate_digit(d);
+ }
+
+ let m = n % 10;
+ if m != 0 {
+ for _ in m..10 {
+ write!(out, " ")?;
+ }
+ writeln!(out, "\t:{}", n)?;
+ }
+ Ok(())
+}
+
+const DEFAULT_DIGITS: isize = 512;
+
+fn main() {
+ let args = std::env::args().collect::<Vec<_>>();
+ let n = if args.len() < 2 {
+ DEFAULT_DIGITS
+ } else if args[1] == "--bench" {
+ return pidigits(DEFAULT_DIGITS, &mut std::io::sink()).unwrap();
+ } else {
+ FromStr::from_str(&args[1]).unwrap()
+ };
+ pidigits(n, &mut std::io::stdout()).unwrap();
+}
diff --git a/build.rs b/build.rs
new file mode 100644
index 0000000..3daed5e
--- /dev/null
+++ b/build.rs
@@ -0,0 +1,88 @@
+use std::env;
+use std::error::Error;
+use std::fs::File;
+use std::io::Write;
+use std::path::Path;
+
+fn main() {
+ let pointer_width = env::var("CARGO_CFG_TARGET_POINTER_WIDTH");
+ let u64_digit = pointer_width.as_ref().map(String::as_str) == Ok("64");
+ if u64_digit {
+ autocfg::emit("u64_digit");
+ }
+ let ac = autocfg::new();
+ let std = if ac.probe_sysroot_crate("std") {
+ "std"
+ } else {
+ "core"
+ };
+ if ac.probe_path(&format!("{}::convert::TryFrom", std)) {
+ autocfg::emit("has_try_from");
+ }
+
+ if let Ok(target_arch) = env::var("CARGO_CFG_TARGET_ARCH") {
+ if target_arch == "x86_64" || target_arch == "x86" {
+ let digit = if u64_digit { "u64" } else { "u32" };
+
+ let addcarry = format!("{}::arch::{}::_addcarry_{}", std, target_arch, digit);
+ if ac.probe_path(&addcarry) {
+ autocfg::emit("use_addcarry");
+ }
+ }
+ }
+
+ autocfg::rerun_path("build.rs");
+
+ write_radix_bases().unwrap();
+}
+
+/// Write tables of the greatest power of each radix for the given bit size. These are returned
+/// from `biguint::get_radix_base` to batch the multiplication/division of radix conversions on
+/// full `BigUint` values, operating on primitive integers as much as possible.
+///
+/// e.g. BASES_16[3] = (59049, 10) // 3¹⁰ fits in u16, but 3¹¹ is too big
+/// BASES_32[3] = (3486784401, 20)
+/// BASES_64[3] = (12157665459056928801, 40)
+///
+/// Powers of two are not included, just zeroed, as they're implemented with shifts.
+fn write_radix_bases() -> Result<(), Box<dyn Error>> {
+ let out_dir = env::var("OUT_DIR")?;
+ let dest_path = Path::new(&out_dir).join("radix_bases.rs");
+ let mut f = File::create(&dest_path)?;
+
+ for &bits in &[16, 32, 64] {
+ let max = if bits < 64 {
+ (1 << bits) - 1
+ } else {
+ std::u64::MAX
+ };
+
+ writeln!(f, "#[deny(overflowing_literals)]")?;
+ writeln!(
+ f,
+ "pub(crate) static BASES_{bits}: [(u{bits}, usize); 257] = [",
+ bits = bits
+ )?;
+ for radix in 0u64..257 {
+ let (base, power) = if radix == 0 || radix.is_power_of_two() {
+ (0, 0)
+ } else {
+ let mut power = 1;
+ let mut base = radix;
+
+ while let Some(b) = base.checked_mul(radix) {
+ if b > max {
+ break;
+ }
+ base = b;
+ power += 1;
+ }
+ (base, power)
+ };
+ writeln!(f, " ({}, {}), // {}", base, power, radix)?;
+ }
+ writeln!(f, "];")?;
+ }
+
+ Ok(())
+}
diff --git a/src/bigint.rs b/src/bigint.rs
new file mode 100644
index 0000000..891eeb4
--- /dev/null
+++ b/src/bigint.rs
@@ -0,0 +1,1147 @@
+// `Add`/`Sub` ops may flip from `BigInt` to its `BigUint` magnitude
+#![allow(clippy::suspicious_arithmetic_impl)]
+
+use crate::std_alloc::{String, Vec};
+use core::cmp::Ordering::{self, Equal};
+use core::default::Default;
+use core::fmt;
+use core::hash;
+use core::ops::{Neg, Not};
+use core::str;
+use core::{i128, u128};
+use core::{i64, u64};
+
+use num_integer::{Integer, Roots};
+use num_traits::{Num, One, Pow, Signed, Zero};
+
+use self::Sign::{Minus, NoSign, Plus};
+
+use crate::big_digit::BigDigit;
+use crate::biguint::to_str_radix_reversed;
+use crate::biguint::{BigUint, IntDigits, U32Digits, U64Digits};
+
+mod addition;
+mod division;
+mod multiplication;
+mod subtraction;
+
+mod bits;
+mod convert;
+mod power;
+mod shift;
+
+#[cfg(any(feature = "quickcheck", feature = "arbitrary"))]
+mod arbitrary;
+
+#[cfg(feature = "serde")]
+mod serde;
+
+/// A Sign is a `BigInt`'s composing element.
+#[derive(PartialEq, PartialOrd, Eq, Ord, Copy, Clone, Debug, Hash)]
+pub enum Sign {
+ Minus,
+ NoSign,
+ Plus,
+}
+
+impl Neg for Sign {
+ type Output = Sign;
+
+ /// Negate Sign value.
+ #[inline]
+ fn neg(self) -> Sign {
+ match self {
+ Minus => Plus,
+ NoSign => NoSign,
+ Plus => Minus,
+ }
+ }
+}
+
+/// A big signed integer type.
+pub struct BigInt {
+ sign: Sign,
+ data: BigUint,
+}
+
+// Note: derived `Clone` doesn't specialize `clone_from`,
+// but we want to keep the allocation in `data`.
+impl Clone for BigInt {
+ #[inline]
+ fn clone(&self) -> Self {
+ BigInt {
+ sign: self.sign,
+ data: self.data.clone(),
+ }
+ }
+
+ #[inline]
+ fn clone_from(&mut self, other: &Self) {
+ self.sign = other.sign;
+ self.data.clone_from(&other.data);
+ }
+}
+
+impl hash::Hash for BigInt {
+ #[inline]
+ fn hash<H: hash::Hasher>(&self, state: &mut H) {
+ debug_assert!((self.sign != NoSign) ^ self.data.is_zero());
+ self.sign.hash(state);
+ if self.sign != NoSign {
+ self.data.hash(state);
+ }
+ }
+}
+
+impl PartialEq for BigInt {
+ #[inline]
+ fn eq(&self, other: &BigInt) -> bool {
+ debug_assert!((self.sign != NoSign) ^ self.data.is_zero());
+ debug_assert!((other.sign != NoSign) ^ other.data.is_zero());
+ self.sign == other.sign && (self.sign == NoSign || self.data == other.data)
+ }
+}
+
+impl Eq for BigInt {}
+
+impl PartialOrd for BigInt {
+ #[inline]
+ fn partial_cmp(&self, other: &BigInt) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+impl Ord for BigInt {
+ #[inline]
+ fn cmp(&self, other: &BigInt) -> Ordering {
+ debug_assert!((self.sign != NoSign) ^ self.data.is_zero());
+ debug_assert!((other.sign != NoSign) ^ other.data.is_zero());
+ let scmp = self.sign.cmp(&other.sign);
+ if scmp != Equal {
+ return scmp;
+ }
+
+ match self.sign {
+ NoSign => Equal,
+ Plus => self.data.cmp(&other.data),
+ Minus => other.data.cmp(&self.data),
+ }
+ }
+}
+
+impl Default for BigInt {
+ #[inline]
+ fn default() -> BigInt {
+ Zero::zero()
+ }
+}
+
+impl fmt::Debug for BigInt {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(self, f)
+ }
+}
+
+impl fmt::Display for BigInt {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.pad_integral(!self.is_negative(), "", &self.data.to_str_radix(10))
+ }
+}
+
+impl fmt::Binary for BigInt {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.pad_integral(!self.is_negative(), "0b", &self.data.to_str_radix(2))
+ }
+}
+
+impl fmt::Octal for BigInt {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.pad_integral(!self.is_negative(), "0o", &self.data.to_str_radix(8))
+ }
+}
+
+impl fmt::LowerHex for BigInt {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.pad_integral(!self.is_negative(), "0x", &self.data.to_str_radix(16))
+ }
+}
+
+impl fmt::UpperHex for BigInt {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let mut s = self.data.to_str_radix(16);
+ s.make_ascii_uppercase();
+ f.pad_integral(!self.is_negative(), "0x", &s)
+ }
+}
+
+// !-2 = !...f fe = ...0 01 = +1
+// !-1 = !...f ff = ...0 00 = 0
+// ! 0 = !...0 00 = ...f ff = -1
+// !+1 = !...0 01 = ...f fe = -2
+impl Not for BigInt {
+ type Output = BigInt;
+
+ fn not(mut self) -> BigInt {
+ match self.sign {
+ NoSign | Plus => {
+ self.data += 1u32;
+ self.sign = Minus;
+ }
+ Minus => {
+ self.data -= 1u32;
+ self.sign = if self.data.is_zero() { NoSign } else { Plus };
+ }
+ }
+ self
+ }
+}
+
+impl<'a> Not for &'a BigInt {
+ type Output = BigInt;
+
+ fn not(self) -> BigInt {
+ match self.sign {
+ NoSign => -BigInt::one(),
+ Plus => -BigInt::from(&self.data + 1u32),
+ Minus => BigInt::from(&self.data - 1u32),
+ }
+ }
+}
+
+impl Zero for BigInt {
+ #[inline]
+ fn zero() -> BigInt {
+ BigInt {
+ sign: NoSign,
+ data: BigUint::zero(),
+ }
+ }
+
+ #[inline]
+ fn set_zero(&mut self) {
+ self.data.set_zero();
+ self.sign = NoSign;
+ }
+
+ #[inline]
+ fn is_zero(&self) -> bool {
+ self.sign == NoSign
+ }
+}
+
+impl One for BigInt {
+ #[inline]
+ fn one() -> BigInt {
+ BigInt {
+ sign: Plus,
+ data: BigUint::one(),
+ }
+ }
+
+ #[inline]
+ fn set_one(&mut self) {
+ self.data.set_one();
+ self.sign = Plus;
+ }
+
+ #[inline]
+ fn is_one(&self) -> bool {
+ self.sign == Plus && self.data.is_one()
+ }
+}
+
+impl Signed for BigInt {
+ #[inline]
+ fn abs(&self) -> BigInt {
+ match self.sign {
+ Plus | NoSign => self.clone(),
+ Minus => BigInt::from(self.data.clone()),
+ }
+ }
+
+ #[inline]
+ fn abs_sub(&self, other: &BigInt) -> BigInt {
+ if *self <= *other {
+ Zero::zero()
+ } else {
+ self - other
+ }
+ }
+
+ #[inline]
+ fn signum(&self) -> BigInt {
+ match self.sign {
+ Plus => BigInt::one(),
+ Minus => -BigInt::one(),
+ NoSign => BigInt::zero(),
+ }
+ }
+
+ #[inline]
+ fn is_positive(&self) -> bool {
+ self.sign == Plus
+ }
+
+ #[inline]
+ fn is_negative(&self) -> bool {
+ self.sign == Minus
+ }
+}
+
+trait UnsignedAbs {
+ type Unsigned;
+
+ /// A convenience method for getting the absolute value of a signed primitive as unsigned
+ /// See also `unsigned_abs`: https://github.com/rust-lang/rust/issues/74913
+ fn uabs(self) -> Self::Unsigned;
+
+ fn checked_uabs(self) -> CheckedUnsignedAbs<Self::Unsigned>;
+}
+
+enum CheckedUnsignedAbs<T> {
+ Positive(T),
+ Negative(T),
+}
+use self::CheckedUnsignedAbs::{Negative, Positive};
+
+macro_rules! impl_unsigned_abs {
+ ($Signed:ty, $Unsigned:ty) => {
+ impl UnsignedAbs for $Signed {
+ type Unsigned = $Unsigned;
+
+ #[inline]
+ fn uabs(self) -> $Unsigned {
+ self.wrapping_abs() as $Unsigned
+ }
+
+ #[inline]
+ fn checked_uabs(self) -> CheckedUnsignedAbs<Self::Unsigned> {
+ if self >= 0 {
+ Positive(self as $Unsigned)
+ } else {
+ Negative(self.wrapping_neg() as $Unsigned)
+ }
+ }
+ }
+ };
+}
+impl_unsigned_abs!(i8, u8);
+impl_unsigned_abs!(i16, u16);
+impl_unsigned_abs!(i32, u32);
+impl_unsigned_abs!(i64, u64);
+impl_unsigned_abs!(i128, u128);
+impl_unsigned_abs!(isize, usize);
+
+impl Neg for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn neg(mut self) -> BigInt {
+ self.sign = -self.sign;
+ self
+ }
+}
+
+impl<'a> Neg for &'a BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn neg(self) -> BigInt {
+ -self.clone()
+ }
+}
+
+impl Integer for BigInt {
+ #[inline]
+ fn div_rem(&self, other: &BigInt) -> (BigInt, BigInt) {
+ // r.sign == self.sign
+ let (d_ui, r_ui) = self.data.div_rem(&other.data);
+ let d = BigInt::from_biguint(self.sign, d_ui);
+ let r = BigInt::from_biguint(self.sign, r_ui);
+ if other.is_negative() {
+ (-d, r)
+ } else {
+ (d, r)
+ }
+ }
+
+ #[inline]
+ fn div_floor(&self, other: &BigInt) -> BigInt {
+ let (d_ui, m) = self.data.div_mod_floor(&other.data);
+ let d = BigInt::from(d_ui);
+ match (self.sign, other.sign) {
+ (Plus, Plus) | (NoSign, Plus) | (Minus, Minus) => d,
+ (Plus, Minus) | (NoSign, Minus) | (Minus, Plus) => {
+ if m.is_zero() {
+ -d
+ } else {
+ -d - 1u32
+ }
+ }
+ (_, NoSign) => unreachable!(),
+ }
+ }
+
+ #[inline]
+ fn mod_floor(&self, other: &BigInt) -> BigInt {
+ // m.sign == other.sign
+ let m_ui = self.data.mod_floor(&other.data);
+ let m = BigInt::from_biguint(other.sign, m_ui);
+ match (self.sign, other.sign) {
+ (Plus, Plus) | (NoSign, Plus) | (Minus, Minus) => m,
+ (Plus, Minus) | (NoSign, Minus) | (Minus, Plus) => {
+ if m.is_zero() {
+ m
+ } else {
+ other - m
+ }
+ }
+ (_, NoSign) => unreachable!(),
+ }
+ }
+
+ fn div_mod_floor(&self, other: &BigInt) -> (BigInt, BigInt) {
+ // m.sign == other.sign
+ let (d_ui, m_ui) = self.data.div_mod_floor(&other.data);
+ let d = BigInt::from(d_ui);
+ let m = BigInt::from_biguint(other.sign, m_ui);
+ match (self.sign, other.sign) {
+ (Plus, Plus) | (NoSign, Plus) | (Minus, Minus) => (d, m),
+ (Plus, Minus) | (NoSign, Minus) | (Minus, Plus) => {
+ if m.is_zero() {
+ (-d, m)
+ } else {
+ (-d - 1u32, other - m)
+ }
+ }
+ (_, NoSign) => unreachable!(),
+ }
+ }
+
+ #[inline]
+ fn div_ceil(&self, other: &Self) -> Self {
+ let (d_ui, m) = self.data.div_mod_floor(&other.data);
+ let d = BigInt::from(d_ui);
+ match (self.sign, other.sign) {
+ (Plus, Minus) | (NoSign, Minus) | (Minus, Plus) => -d,
+ (Plus, Plus) | (NoSign, Plus) | (Minus, Minus) => {
+ if m.is_zero() {
+ d
+ } else {
+ d + 1u32
+ }
+ }
+ (_, NoSign) => unreachable!(),
+ }
+ }
+
+ /// Calculates the Greatest Common Divisor (GCD) of the number and `other`.
+ ///
+ /// The result is always positive.
+ #[inline]
+ fn gcd(&self, other: &BigInt) -> BigInt {
+ BigInt::from(self.data.gcd(&other.data))
+ }
+
+ /// Calculates the Lowest Common Multiple (LCM) of the number and `other`.
+ #[inline]
+ fn lcm(&self, other: &BigInt) -> BigInt {
+ BigInt::from(self.data.lcm(&other.data))
+ }
+
+ /// Calculates the Greatest Common Divisor (GCD) and
+ /// Lowest Common Multiple (LCM) together.
+ #[inline]
+ fn gcd_lcm(&self, other: &BigInt) -> (BigInt, BigInt) {
+ let (gcd, lcm) = self.data.gcd_lcm(&other.data);
+ (BigInt::from(gcd), BigInt::from(lcm))
+ }
+
+ /// Greatest common divisor, least common multiple, and Bézout coefficients.
+ #[inline]
+ fn extended_gcd_lcm(&self, other: &BigInt) -> (num_integer::ExtendedGcd<BigInt>, BigInt) {
+ let egcd = self.extended_gcd(other);
+ let lcm = if egcd.gcd.is_zero() {
+ BigInt::zero()
+ } else {
+ BigInt::from(&self.data / &egcd.gcd.data * &other.data)
+ };
+ (egcd, lcm)
+ }
+
+ /// Deprecated, use `is_multiple_of` instead.
+ #[inline]
+ fn divides(&self, other: &BigInt) -> bool {
+ self.is_multiple_of(other)
+ }
+
+ /// Returns `true` if the number is a multiple of `other`.
+ #[inline]
+ fn is_multiple_of(&self, other: &BigInt) -> bool {
+ self.data.is_multiple_of(&other.data)
+ }
+
+ /// Returns `true` if the number is divisible by `2`.
+ #[inline]
+ fn is_even(&self) -> bool {
+ self.data.is_even()
+ }
+
+ /// Returns `true` if the number is not divisible by `2`.
+ #[inline]
+ fn is_odd(&self) -> bool {
+ self.data.is_odd()
+ }
+
+ /// Rounds up to nearest multiple of argument.
+ #[inline]
+ fn next_multiple_of(&self, other: &Self) -> Self {
+ let m = self.mod_floor(other);
+ if m.is_zero() {
+ self.clone()
+ } else {
+ self + (other - m)
+ }
+ }
+ /// Rounds down to nearest multiple of argument.
+ #[inline]
+ fn prev_multiple_of(&self, other: &Self) -> Self {
+ self - self.mod_floor(other)
+ }
+}
+
+impl Roots for BigInt {
+ fn nth_root(&self, n: u32) -> Self {
+ assert!(
+ !(self.is_negative() && n.is_even()),
+ "root of degree {} is imaginary",
+ n
+ );
+
+ BigInt::from_biguint(self.sign, self.data.nth_root(n))
+ }
+
+ fn sqrt(&self) -> Self {
+ assert!(!self.is_negative(), "square root is imaginary");
+
+ BigInt::from_biguint(self.sign, self.data.sqrt())
+ }
+
+ fn cbrt(&self) -> Self {
+ BigInt::from_biguint(self.sign, self.data.cbrt())
+ }
+}
+
+impl IntDigits for BigInt {
+ #[inline]
+ fn digits(&self) -> &[BigDigit] {
+ self.data.digits()
+ }
+ #[inline]
+ fn digits_mut(&mut self) -> &mut Vec<BigDigit> {
+ self.data.digits_mut()
+ }
+ #[inline]
+ fn normalize(&mut self) {
+ self.data.normalize();
+ if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+ #[inline]
+ fn capacity(&self) -> usize {
+ self.data.capacity()
+ }
+ #[inline]
+ fn len(&self) -> usize {
+ self.data.len()
+ }
+}
+
+/// A generic trait for converting a value to a `BigInt`. This may return
+/// `None` when converting from `f32` or `f64`, and will always succeed
+/// when converting from any integer or unsigned primitive, or `BigUint`.
+pub trait ToBigInt {
+ /// Converts the value of `self` to a `BigInt`.
+ fn to_bigint(&self) -> Option<BigInt>;
+}
+
+impl BigInt {
+ /// Creates and initializes a BigInt.
+ ///
+ /// The base 2<sup>32</sup> digits are ordered least significant digit first.
+ #[inline]
+ pub fn new(sign: Sign, digits: Vec<u32>) -> BigInt {
+ BigInt::from_biguint(sign, BigUint::new(digits))
+ }
+
+ /// Creates and initializes a `BigInt`.
+ ///
+ /// The base 2<sup>32</sup> digits are ordered least significant digit first.
+ #[inline]
+ pub fn from_biguint(mut sign: Sign, mut data: BigUint) -> BigInt {
+ if sign == NoSign {
+ data.assign_from_slice(&[]);
+ } else if data.is_zero() {
+ sign = NoSign;
+ }
+
+ BigInt { sign, data }
+ }
+
+ /// Creates and initializes a `BigInt`.
+ ///
+ /// The base 2<sup>32</sup> digits are ordered least significant digit first.
+ #[inline]
+ pub fn from_slice(sign: Sign, slice: &[u32]) -> BigInt {
+ BigInt::from_biguint(sign, BigUint::from_slice(slice))
+ }
+
+ /// Reinitializes a `BigInt`.
+ ///
+ /// The base 2<sup>32</sup> digits are ordered least significant digit first.
+ #[inline]
+ pub fn assign_from_slice(&mut self, sign: Sign, slice: &[u32]) {
+ if sign == NoSign {
+ self.set_zero();
+ } else {
+ self.data.assign_from_slice(slice);
+ self.sign = if self.data.is_zero() { NoSign } else { sign };
+ }
+ }
+
+ /// Creates and initializes a `BigInt`.
+ ///
+ /// The bytes are in big-endian byte order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{BigInt, Sign};
+ ///
+ /// assert_eq!(BigInt::from_bytes_be(Sign::Plus, b"A"),
+ /// BigInt::parse_bytes(b"65", 10).unwrap());
+ /// assert_eq!(BigInt::from_bytes_be(Sign::Plus, b"AA"),
+ /// BigInt::parse_bytes(b"16705", 10).unwrap());
+ /// assert_eq!(BigInt::from_bytes_be(Sign::Plus, b"AB"),
+ /// BigInt::parse_bytes(b"16706", 10).unwrap());
+ /// assert_eq!(BigInt::from_bytes_be(Sign::Plus, b"Hello world!"),
+ /// BigInt::parse_bytes(b"22405534230753963835153736737", 10).unwrap());
+ /// ```
+ #[inline]
+ pub fn from_bytes_be(sign: Sign, bytes: &[u8]) -> BigInt {
+ BigInt::from_biguint(sign, BigUint::from_bytes_be(bytes))
+ }
+
+ /// Creates and initializes a `BigInt`.
+ ///
+ /// The bytes are in little-endian byte order.
+ #[inline]
+ pub fn from_bytes_le(sign: Sign, bytes: &[u8]) -> BigInt {
+ BigInt::from_biguint(sign, BigUint::from_bytes_le(bytes))
+ }
+
+ /// Creates and initializes a `BigInt` from an array of bytes in
+ /// two's complement binary representation.
+ ///
+ /// The digits are in big-endian base 2<sup>8</sup>.
+ #[inline]
+ pub fn from_signed_bytes_be(digits: &[u8]) -> BigInt {
+ convert::from_signed_bytes_be(digits)
+ }
+
+ /// Creates and initializes a `BigInt` from an array of bytes in two's complement.
+ ///
+ /// The digits are in little-endian base 2<sup>8</sup>.
+ #[inline]
+ pub fn from_signed_bytes_le(digits: &[u8]) -> BigInt {
+ convert::from_signed_bytes_le(digits)
+ }
+
+ /// Creates and initializes a `BigInt`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{BigInt, ToBigInt};
+ ///
+ /// assert_eq!(BigInt::parse_bytes(b"1234", 10), ToBigInt::to_bigint(&1234));
+ /// assert_eq!(BigInt::parse_bytes(b"ABCD", 16), ToBigInt::to_bigint(&0xABCD));
+ /// assert_eq!(BigInt::parse_bytes(b"G", 16), None);
+ /// ```
+ #[inline]
+ pub fn parse_bytes(buf: &[u8], radix: u32) -> Option<BigInt> {
+ let s = str::from_utf8(buf).ok()?;
+ BigInt::from_str_radix(s, radix).ok()
+ }
+
+ /// Creates and initializes a `BigInt`. Each u8 of the input slice is
+ /// interpreted as one digit of the number
+ /// and must therefore be less than `radix`.
+ ///
+ /// The bytes are in big-endian byte order.
+ /// `radix` must be in the range `2...256`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{BigInt, Sign};
+ ///
+ /// let inbase190 = vec![15, 33, 125, 12, 14];
+ /// let a = BigInt::from_radix_be(Sign::Minus, &inbase190, 190).unwrap();
+ /// assert_eq!(a.to_radix_be(190), (Sign:: Minus, inbase190));
+ /// ```
+ pub fn from_radix_be(sign: Sign, buf: &[u8], radix: u32) -> Option<BigInt> {
+ let u = BigUint::from_radix_be(buf, radix)?;
+ Some(BigInt::from_biguint(sign, u))
+ }
+
+ /// Creates and initializes a `BigInt`. Each u8 of the input slice is
+ /// interpreted as one digit of the number
+ /// and must therefore be less than `radix`.
+ ///
+ /// The bytes are in little-endian byte order.
+ /// `radix` must be in the range `2...256`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{BigInt, Sign};
+ ///
+ /// let inbase190 = vec![14, 12, 125, 33, 15];
+ /// let a = BigInt::from_radix_be(Sign::Minus, &inbase190, 190).unwrap();
+ /// assert_eq!(a.to_radix_be(190), (Sign::Minus, inbase190));
+ /// ```
+ pub fn from_radix_le(sign: Sign, buf: &[u8], radix: u32) -> Option<BigInt> {
+ let u = BigUint::from_radix_le(buf, radix)?;
+ Some(BigInt::from_biguint(sign, u))
+ }
+
+ /// Returns the sign and the byte representation of the `BigInt` in big-endian byte order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{ToBigInt, Sign};
+ ///
+ /// let i = -1125.to_bigint().unwrap();
+ /// assert_eq!(i.to_bytes_be(), (Sign::Minus, vec![4, 101]));
+ /// ```
+ #[inline]
+ pub fn to_bytes_be(&self) -> (Sign, Vec<u8>) {
+ (self.sign, self.data.to_bytes_be())
+ }
+
+ /// Returns the sign and the byte representation of the `BigInt` in little-endian byte order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{ToBigInt, Sign};
+ ///
+ /// let i = -1125.to_bigint().unwrap();
+ /// assert_eq!(i.to_bytes_le(), (Sign::Minus, vec![101, 4]));
+ /// ```
+ #[inline]
+ pub fn to_bytes_le(&self) -> (Sign, Vec<u8>) {
+ (self.sign, self.data.to_bytes_le())
+ }
+
+ /// Returns the sign and the `u32` digits representation of the `BigInt` ordered least
+ /// significant digit first.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{BigInt, Sign};
+ ///
+ /// assert_eq!(BigInt::from(-1125).to_u32_digits(), (Sign::Minus, vec![1125]));
+ /// assert_eq!(BigInt::from(4294967295u32).to_u32_digits(), (Sign::Plus, vec![4294967295]));
+ /// assert_eq!(BigInt::from(4294967296u64).to_u32_digits(), (Sign::Plus, vec![0, 1]));
+ /// assert_eq!(BigInt::from(-112500000000i64).to_u32_digits(), (Sign::Minus, vec![830850304, 26]));
+ /// assert_eq!(BigInt::from(112500000000i64).to_u32_digits(), (Sign::Plus, vec![830850304, 26]));
+ /// ```
+ #[inline]
+ pub fn to_u32_digits(&self) -> (Sign, Vec<u32>) {
+ (self.sign, self.data.to_u32_digits())
+ }
+
+ /// Returns the sign and the `u64` digits representation of the `BigInt` ordered least
+ /// significant digit first.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{BigInt, Sign};
+ ///
+ /// assert_eq!(BigInt::from(-1125).to_u64_digits(), (Sign::Minus, vec![1125]));
+ /// assert_eq!(BigInt::from(4294967295u32).to_u64_digits(), (Sign::Plus, vec![4294967295]));
+ /// assert_eq!(BigInt::from(4294967296u64).to_u64_digits(), (Sign::Plus, vec![4294967296]));
+ /// assert_eq!(BigInt::from(-112500000000i64).to_u64_digits(), (Sign::Minus, vec![112500000000]));
+ /// assert_eq!(BigInt::from(112500000000i64).to_u64_digits(), (Sign::Plus, vec![112500000000]));
+ /// assert_eq!(BigInt::from(1u128 << 64).to_u64_digits(), (Sign::Plus, vec![0, 1]));
+ /// ```
+ #[inline]
+ pub fn to_u64_digits(&self) -> (Sign, Vec<u64>) {
+ (self.sign, self.data.to_u64_digits())
+ }
+
+ /// Returns an iterator of `u32` digits representation of the `BigInt` ordered least
+ /// significant digit first.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::BigInt;
+ ///
+ /// assert_eq!(BigInt::from(-1125).iter_u32_digits().collect::<Vec<u32>>(), vec![1125]);
+ /// assert_eq!(BigInt::from(4294967295u32).iter_u32_digits().collect::<Vec<u32>>(), vec![4294967295]);
+ /// assert_eq!(BigInt::from(4294967296u64).iter_u32_digits().collect::<Vec<u32>>(), vec![0, 1]);
+ /// assert_eq!(BigInt::from(-112500000000i64).iter_u32_digits().collect::<Vec<u32>>(), vec![830850304, 26]);
+ /// assert_eq!(BigInt::from(112500000000i64).iter_u32_digits().collect::<Vec<u32>>(), vec![830850304, 26]);
+ /// ```
+ #[inline]
+ pub fn iter_u32_digits(&self) -> U32Digits<'_> {
+ self.data.iter_u32_digits()
+ }
+
+ /// Returns an iterator of `u64` digits representation of the `BigInt` ordered least
+ /// significant digit first.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::BigInt;
+ ///
+ /// assert_eq!(BigInt::from(-1125).iter_u64_digits().collect::<Vec<u64>>(), vec![1125u64]);
+ /// assert_eq!(BigInt::from(4294967295u32).iter_u64_digits().collect::<Vec<u64>>(), vec![4294967295u64]);
+ /// assert_eq!(BigInt::from(4294967296u64).iter_u64_digits().collect::<Vec<u64>>(), vec![4294967296u64]);
+ /// assert_eq!(BigInt::from(-112500000000i64).iter_u64_digits().collect::<Vec<u64>>(), vec![112500000000u64]);
+ /// assert_eq!(BigInt::from(112500000000i64).iter_u64_digits().collect::<Vec<u64>>(), vec![112500000000u64]);
+ /// assert_eq!(BigInt::from(1u128 << 64).iter_u64_digits().collect::<Vec<u64>>(), vec![0, 1]);
+ /// ```
+ #[inline]
+ pub fn iter_u64_digits(&self) -> U64Digits<'_> {
+ self.data.iter_u64_digits()
+ }
+
+ /// Returns the two's-complement byte representation of the `BigInt` in big-endian byte order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::ToBigInt;
+ ///
+ /// let i = -1125.to_bigint().unwrap();
+ /// assert_eq!(i.to_signed_bytes_be(), vec![251, 155]);
+ /// ```
+ #[inline]
+ pub fn to_signed_bytes_be(&self) -> Vec<u8> {
+ convert::to_signed_bytes_be(self)
+ }
+
+ /// Returns the two's-complement byte representation of the `BigInt` in little-endian byte order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::ToBigInt;
+ ///
+ /// let i = -1125.to_bigint().unwrap();
+ /// assert_eq!(i.to_signed_bytes_le(), vec![155, 251]);
+ /// ```
+ #[inline]
+ pub fn to_signed_bytes_le(&self) -> Vec<u8> {
+ convert::to_signed_bytes_le(self)
+ }
+
+ /// Returns the integer formatted as a string in the given radix.
+ /// `radix` must be in the range `2...36`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::BigInt;
+ ///
+ /// let i = BigInt::parse_bytes(b"ff", 16).unwrap();
+ /// assert_eq!(i.to_str_radix(16), "ff");
+ /// ```
+ #[inline]
+ pub fn to_str_radix(&self, radix: u32) -> String {
+ let mut v = to_str_radix_reversed(&self.data, radix);
+
+ if self.is_negative() {
+ v.push(b'-');
+ }
+
+ v.reverse();
+ unsafe { String::from_utf8_unchecked(v) }
+ }
+
+ /// Returns the integer in the requested base in big-endian digit order.
+ /// The output is not given in a human readable alphabet but as a zero
+ /// based u8 number.
+ /// `radix` must be in the range `2...256`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{BigInt, Sign};
+ ///
+ /// assert_eq!(BigInt::from(-0xFFFFi64).to_radix_be(159),
+ /// (Sign::Minus, vec![2, 94, 27]));
+ /// // 0xFFFF = 65535 = 2*(159^2) + 94*159 + 27
+ /// ```
+ #[inline]
+ pub fn to_radix_be(&self, radix: u32) -> (Sign, Vec<u8>) {
+ (self.sign, self.data.to_radix_be(radix))
+ }
+
+ /// Returns the integer in the requested base in little-endian digit order.
+ /// The output is not given in a human readable alphabet but as a zero
+ /// based u8 number.
+ /// `radix` must be in the range `2...256`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{BigInt, Sign};
+ ///
+ /// assert_eq!(BigInt::from(-0xFFFFi64).to_radix_le(159),
+ /// (Sign::Minus, vec![27, 94, 2]));
+ /// // 0xFFFF = 65535 = 27 + 94*159 + 2*(159^2)
+ /// ```
+ #[inline]
+ pub fn to_radix_le(&self, radix: u32) -> (Sign, Vec<u8>) {
+ (self.sign, self.data.to_radix_le(radix))
+ }
+
+ /// Returns the sign of the `BigInt` as a `Sign`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{BigInt, Sign};
+ /// use num_traits::Zero;
+ ///
+ /// assert_eq!(BigInt::from(1234).sign(), Sign::Plus);
+ /// assert_eq!(BigInt::from(-4321).sign(), Sign::Minus);
+ /// assert_eq!(BigInt::zero().sign(), Sign::NoSign);
+ /// ```
+ #[inline]
+ pub fn sign(&self) -> Sign {
+ self.sign
+ }
+
+ /// Returns the magnitude of the `BigInt` as a `BigUint`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{BigInt, BigUint};
+ /// use num_traits::Zero;
+ ///
+ /// assert_eq!(BigInt::from(1234).magnitude(), &BigUint::from(1234u32));
+ /// assert_eq!(BigInt::from(-4321).magnitude(), &BigUint::from(4321u32));
+ /// assert!(BigInt::zero().magnitude().is_zero());
+ /// ```
+ #[inline]
+ pub fn magnitude(&self) -> &BigUint {
+ &self.data
+ }
+
+ /// Convert this `BigInt` into its `Sign` and `BigUint` magnitude,
+ /// the reverse of `BigInt::from_biguint`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{BigInt, BigUint, Sign};
+ /// use num_traits::Zero;
+ ///
+ /// assert_eq!(BigInt::from(1234).into_parts(), (Sign::Plus, BigUint::from(1234u32)));
+ /// assert_eq!(BigInt::from(-4321).into_parts(), (Sign::Minus, BigUint::from(4321u32)));
+ /// assert_eq!(BigInt::zero().into_parts(), (Sign::NoSign, BigUint::zero()));
+ /// ```
+ #[inline]
+ pub fn into_parts(self) -> (Sign, BigUint) {
+ (self.sign, self.data)
+ }
+
+ /// Determines the fewest bits necessary to express the `BigInt`,
+ /// not including the sign.
+ #[inline]
+ pub fn bits(&self) -> u64 {
+ self.data.bits()
+ }
+
+ /// Converts this `BigInt` into a `BigUint`, if it's not negative.
+ #[inline]
+ pub fn to_biguint(&self) -> Option<BigUint> {
+ match self.sign {
+ Plus => Some(self.data.clone()),
+ NoSign => Some(Zero::zero()),
+ Minus => None,
+ }
+ }
+
+ #[inline]
+ pub fn checked_add(&self, v: &BigInt) -> Option<BigInt> {
+ Some(self + v)
+ }
+
+ #[inline]
+ pub fn checked_sub(&self, v: &BigInt) -> Option<BigInt> {
+ Some(self - v)
+ }
+
+ #[inline]
+ pub fn checked_mul(&self, v: &BigInt) -> Option<BigInt> {
+ Some(self * v)
+ }
+
+ #[inline]
+ pub fn checked_div(&self, v: &BigInt) -> Option<BigInt> {
+ if v.is_zero() {
+ return None;
+ }
+ Some(self / v)
+ }
+
+ /// Returns `self ^ exponent`.
+ pub fn pow(&self, exponent: u32) -> Self {
+ Pow::pow(self, exponent)
+ }
+
+ /// Returns `(self ^ exponent) mod modulus`
+ ///
+ /// Note that this rounds like `mod_floor`, not like the `%` operator,
+ /// which makes a difference when given a negative `self` or `modulus`.
+ /// The result will be in the interval `[0, modulus)` for `modulus > 0`,
+ /// or in the interval `(modulus, 0]` for `modulus < 0`
+ ///
+ /// Panics if the exponent is negative or the modulus is zero.
+ pub fn modpow(&self, exponent: &Self, modulus: &Self) -> Self {
+ power::modpow(self, exponent, modulus)
+ }
+
+ /// Returns the truncated principal square root of `self` --
+ /// see [Roots::sqrt](https://docs.rs/num-integer/0.1/num_integer/trait.Roots.html#method.sqrt).
+ pub fn sqrt(&self) -> Self {
+ Roots::sqrt(self)
+ }
+
+ /// Returns the truncated principal cube root of `self` --
+ /// see [Roots::cbrt](https://docs.rs/num-integer/0.1/num_integer/trait.Roots.html#method.cbrt).
+ pub fn cbrt(&self) -> Self {
+ Roots::cbrt(self)
+ }
+
+ /// Returns the truncated principal `n`th root of `self` --
+ /// See [Roots::nth_root](https://docs.rs/num-integer/0.1/num_integer/trait.Roots.html#tymethod.nth_root).
+ pub fn nth_root(&self, n: u32) -> Self {
+ Roots::nth_root(self, n)
+ }
+
+ /// Returns the number of least-significant bits that are zero,
+ /// or `None` if the entire number is zero.
+ pub fn trailing_zeros(&self) -> Option<u64> {
+ self.data.trailing_zeros()
+ }
+
+ /// Returns whether the bit in position `bit` is set,
+ /// using the two's complement for negative numbers
+ pub fn bit(&self, bit: u64) -> bool {
+ if self.is_negative() {
+ // Let the binary representation of a number be
+ // ... 0 x 1 0 ... 0
+ // Then the two's complement is
+ // ... 1 !x 1 0 ... 0
+ // where !x is obtained from x by flipping each bit
+ if bit >= u64::from(crate::big_digit::BITS) * self.len() as u64 {
+ true
+ } else {
+ let trailing_zeros = self.data.trailing_zeros().unwrap();
+ match Ord::cmp(&bit, &trailing_zeros) {
+ Ordering::Less => false,
+ Ordering::Equal => true,
+ Ordering::Greater => !self.data.bit(bit),
+ }
+ }
+ } else {
+ self.data.bit(bit)
+ }
+ }
+
+ /// Sets or clears the bit in the given position,
+ /// using the two's complement for negative numbers
+ ///
+ /// Note that setting/clearing a bit (for positive/negative numbers,
+ /// respectively) greater than the current bit length, a reallocation
+ /// may be needed to store the new digits
+ pub fn set_bit(&mut self, bit: u64, value: bool) {
+ match self.sign {
+ Sign::Plus => self.data.set_bit(bit, value),
+ Sign::Minus => bits::set_negative_bit(self, bit, value),
+ Sign::NoSign => {
+ if value {
+ self.data.set_bit(bit, true);
+ self.sign = Sign::Plus;
+ } else {
+ // Clearing a bit for zero is a no-op
+ }
+ }
+ }
+ // The top bit may have been cleared, so normalize
+ self.normalize();
+ }
+}
+
+#[test]
+fn test_from_biguint() {
+ fn check(inp_s: Sign, inp_n: usize, ans_s: Sign, ans_n: usize) {
+ let inp = BigInt::from_biguint(inp_s, BigUint::from(inp_n));
+ let ans = BigInt {
+ sign: ans_s,
+ data: BigUint::from(ans_n),
+ };
+ assert_eq!(inp, ans);
+ }
+ check(Plus, 1, Plus, 1);
+ check(Plus, 0, NoSign, 0);
+ check(Minus, 1, Minus, 1);
+ check(NoSign, 1, NoSign, 0);
+}
+
+#[test]
+fn test_from_slice() {
+ fn check(inp_s: Sign, inp_n: u32, ans_s: Sign, ans_n: u32) {
+ let inp = BigInt::from_slice(inp_s, &[inp_n]);
+ let ans = BigInt {
+ sign: ans_s,
+ data: BigUint::from(ans_n),
+ };
+ assert_eq!(inp, ans);
+ }
+ check(Plus, 1, Plus, 1);
+ check(Plus, 0, NoSign, 0);
+ check(Minus, 1, Minus, 1);
+ check(NoSign, 1, NoSign, 0);
+}
+
+#[test]
+fn test_assign_from_slice() {
+ fn check(inp_s: Sign, inp_n: u32, ans_s: Sign, ans_n: u32) {
+ let mut inp = BigInt::from_slice(Minus, &[2627_u32, 0_u32, 9182_u32, 42_u32]);
+ inp.assign_from_slice(inp_s, &[inp_n]);
+ let ans = BigInt {
+ sign: ans_s,
+ data: BigUint::from(ans_n),
+ };
+ assert_eq!(inp, ans);
+ }
+ check(Plus, 1, Plus, 1);
+ check(Plus, 0, NoSign, 0);
+ check(Minus, 1, Minus, 1);
+ check(NoSign, 1, NoSign, 0);
+}
diff --git a/src/bigint/addition.rs b/src/bigint/addition.rs
new file mode 100644
index 0000000..b999f62
--- /dev/null
+++ b/src/bigint/addition.rs
@@ -0,0 +1,239 @@
+use super::CheckedUnsignedAbs::{Negative, Positive};
+use super::Sign::{Minus, NoSign, Plus};
+use super::{BigInt, UnsignedAbs};
+
+use crate::{IsizePromotion, UsizePromotion};
+
+use core::cmp::Ordering::{Equal, Greater, Less};
+use core::iter::Sum;
+use core::mem;
+use core::ops::{Add, AddAssign};
+use num_traits::{CheckedAdd, Zero};
+
+// We want to forward to BigUint::add, but it's not clear how that will go until
+// we compare both sign and magnitude. So we duplicate this body for every
+// val/ref combination, deferring that decision to BigUint's own forwarding.
+macro_rules! bigint_add {
+ ($a:expr, $a_owned:expr, $a_data:expr, $b:expr, $b_owned:expr, $b_data:expr) => {
+ match ($a.sign, $b.sign) {
+ (_, NoSign) => $a_owned,
+ (NoSign, _) => $b_owned,
+ // same sign => keep the sign with the sum of magnitudes
+ (Plus, Plus) | (Minus, Minus) => BigInt::from_biguint($a.sign, $a_data + $b_data),
+ // opposite signs => keep the sign of the larger with the difference of magnitudes
+ (Plus, Minus) | (Minus, Plus) => match $a.data.cmp(&$b.data) {
+ Less => BigInt::from_biguint($b.sign, $b_data - $a_data),
+ Greater => BigInt::from_biguint($a.sign, $a_data - $b_data),
+ Equal => Zero::zero(),
+ },
+ }
+ };
+}
+
+impl<'a, 'b> Add<&'b BigInt> for &'a BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn add(self, other: &BigInt) -> BigInt {
+ bigint_add!(
+ self,
+ self.clone(),
+ &self.data,
+ other,
+ other.clone(),
+ &other.data
+ )
+ }
+}
+
+impl<'a> Add<BigInt> for &'a BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn add(self, other: BigInt) -> BigInt {
+ bigint_add!(self, self.clone(), &self.data, other, other, other.data)
+ }
+}
+
+impl<'a> Add<&'a BigInt> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn add(self, other: &BigInt) -> BigInt {
+ bigint_add!(self, self, self.data, other, other.clone(), &other.data)
+ }
+}
+
+impl Add<BigInt> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn add(self, other: BigInt) -> BigInt {
+ bigint_add!(self, self, self.data, other, other, other.data)
+ }
+}
+
+impl<'a> AddAssign<&'a BigInt> for BigInt {
+ #[inline]
+ fn add_assign(&mut self, other: &BigInt) {
+ let n = mem::replace(self, BigInt::zero());
+ *self = n + other;
+ }
+}
+forward_val_assign!(impl AddAssign for BigInt, add_assign);
+
+promote_all_scalars!(impl Add for BigInt, add);
+promote_all_scalars_assign!(impl AddAssign for BigInt, add_assign);
+forward_all_scalar_binop_to_val_val_commutative!(impl Add<u32> for BigInt, add);
+forward_all_scalar_binop_to_val_val_commutative!(impl Add<u64> for BigInt, add);
+forward_all_scalar_binop_to_val_val_commutative!(impl Add<u128> for BigInt, add);
+
+impl Add<u32> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn add(self, other: u32) -> BigInt {
+ match self.sign {
+ NoSign => From::from(other),
+ Plus => BigInt::from(self.data + other),
+ Minus => match self.data.cmp(&From::from(other)) {
+ Equal => Zero::zero(),
+ Less => BigInt::from(other - self.data),
+ Greater => -BigInt::from(self.data - other),
+ },
+ }
+ }
+}
+
+impl AddAssign<u32> for BigInt {
+ #[inline]
+ fn add_assign(&mut self, other: u32) {
+ let n = mem::replace(self, BigInt::zero());
+ *self = n + other;
+ }
+}
+
+impl Add<u64> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn add(self, other: u64) -> BigInt {
+ match self.sign {
+ NoSign => From::from(other),
+ Plus => BigInt::from(self.data + other),
+ Minus => match self.data.cmp(&From::from(other)) {
+ Equal => Zero::zero(),
+ Less => BigInt::from(other - self.data),
+ Greater => -BigInt::from(self.data - other),
+ },
+ }
+ }
+}
+
+impl AddAssign<u64> for BigInt {
+ #[inline]
+ fn add_assign(&mut self, other: u64) {
+ let n = mem::replace(self, BigInt::zero());
+ *self = n + other;
+ }
+}
+
+impl Add<u128> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn add(self, other: u128) -> BigInt {
+ match self.sign {
+ NoSign => BigInt::from(other),
+ Plus => BigInt::from(self.data + other),
+ Minus => match self.data.cmp(&From::from(other)) {
+ Equal => BigInt::zero(),
+ Less => BigInt::from(other - self.data),
+ Greater => -BigInt::from(self.data - other),
+ },
+ }
+ }
+}
+impl AddAssign<u128> for BigInt {
+ #[inline]
+ fn add_assign(&mut self, other: u128) {
+ let n = mem::replace(self, BigInt::zero());
+ *self = n + other;
+ }
+}
+
+forward_all_scalar_binop_to_val_val_commutative!(impl Add<i32> for BigInt, add);
+forward_all_scalar_binop_to_val_val_commutative!(impl Add<i64> for BigInt, add);
+forward_all_scalar_binop_to_val_val_commutative!(impl Add<i128> for BigInt, add);
+
+impl Add<i32> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn add(self, other: i32) -> BigInt {
+ match other.checked_uabs() {
+ Positive(u) => self + u,
+ Negative(u) => self - u,
+ }
+ }
+}
+impl AddAssign<i32> for BigInt {
+ #[inline]
+ fn add_assign(&mut self, other: i32) {
+ match other.checked_uabs() {
+ Positive(u) => *self += u,
+ Negative(u) => *self -= u,
+ }
+ }
+}
+
+impl Add<i64> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn add(self, other: i64) -> BigInt {
+ match other.checked_uabs() {
+ Positive(u) => self + u,
+ Negative(u) => self - u,
+ }
+ }
+}
+impl AddAssign<i64> for BigInt {
+ #[inline]
+ fn add_assign(&mut self, other: i64) {
+ match other.checked_uabs() {
+ Positive(u) => *self += u,
+ Negative(u) => *self -= u,
+ }
+ }
+}
+
+impl Add<i128> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn add(self, other: i128) -> BigInt {
+ match other.checked_uabs() {
+ Positive(u) => self + u,
+ Negative(u) => self - u,
+ }
+ }
+}
+impl AddAssign<i128> for BigInt {
+ #[inline]
+ fn add_assign(&mut self, other: i128) {
+ match other.checked_uabs() {
+ Positive(u) => *self += u,
+ Negative(u) => *self -= u,
+ }
+ }
+}
+
+impl CheckedAdd for BigInt {
+ #[inline]
+ fn checked_add(&self, v: &BigInt) -> Option<BigInt> {
+ Some(self.add(v))
+ }
+}
+
+impl_sum_iter_type!(BigInt);
diff --git a/src/bigint/arbitrary.rs b/src/bigint/arbitrary.rs
new file mode 100644
index 0000000..df66050
--- /dev/null
+++ b/src/bigint/arbitrary.rs
@@ -0,0 +1,39 @@
+use super::{BigInt, Sign};
+
+#[cfg(feature = "quickcheck")]
+use crate::std_alloc::Box;
+use crate::BigUint;
+
+#[cfg(feature = "quickcheck")]
+impl quickcheck::Arbitrary for BigInt {
+ fn arbitrary(g: &mut quickcheck::Gen) -> Self {
+ let positive = bool::arbitrary(g);
+ let sign = if positive { Sign::Plus } else { Sign::Minus };
+ Self::from_biguint(sign, BigUint::arbitrary(g))
+ }
+
+ fn shrink(&self) -> Box<dyn Iterator<Item = Self>> {
+ let sign = self.sign();
+ let unsigned_shrink = self.data.shrink();
+ Box::new(unsigned_shrink.map(move |x| BigInt::from_biguint(sign, x)))
+ }
+}
+
+#[cfg(feature = "arbitrary")]
+impl arbitrary::Arbitrary<'_> for BigInt {
+ fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result<Self> {
+ let positive = bool::arbitrary(u)?;
+ let sign = if positive { Sign::Plus } else { Sign::Minus };
+ Ok(Self::from_biguint(sign, BigUint::arbitrary(u)?))
+ }
+
+ fn arbitrary_take_rest(mut u: arbitrary::Unstructured<'_>) -> arbitrary::Result<Self> {
+ let positive = bool::arbitrary(&mut u)?;
+ let sign = if positive { Sign::Plus } else { Sign::Minus };
+ Ok(Self::from_biguint(sign, BigUint::arbitrary_take_rest(u)?))
+ }
+
+ fn size_hint(depth: usize) -> (usize, Option<usize>) {
+ arbitrary::size_hint::and(bool::size_hint(depth), BigUint::size_hint(depth))
+ }
+}
diff --git a/src/bigint/bits.rs b/src/bigint/bits.rs
new file mode 100644
index 0000000..686def4
--- /dev/null
+++ b/src/bigint/bits.rs
@@ -0,0 +1,531 @@
+use super::BigInt;
+use super::Sign::{Minus, NoSign, Plus};
+
+use crate::big_digit::{self, BigDigit, DoubleBigDigit};
+use crate::biguint::IntDigits;
+use crate::std_alloc::Vec;
+
+use core::cmp::Ordering::{Equal, Greater, Less};
+use core::ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign};
+use num_traits::{ToPrimitive, Zero};
+
+// Negation in two's complement.
+// acc must be initialized as 1 for least-significant digit.
+//
+// When negating, a carry (acc == 1) means that all the digits
+// considered to this point were zero. This means that if all the
+// digits of a negative BigInt have been considered, carry must be
+// zero as we cannot have negative zero.
+//
+// 01 -> ...f ff
+// ff -> ...f 01
+// 01 00 -> ...f ff 00
+// 01 01 -> ...f fe ff
+// 01 ff -> ...f fe 01
+// ff 00 -> ...f 01 00
+// ff 01 -> ...f 00 ff
+// ff ff -> ...f 00 01
+#[inline]
+fn negate_carry(a: BigDigit, acc: &mut DoubleBigDigit) -> BigDigit {
+ *acc += DoubleBigDigit::from(!a);
+ let lo = *acc as BigDigit;
+ *acc >>= big_digit::BITS;
+ lo
+}
+
+// + 1 & -ff = ...0 01 & ...f 01 = ...0 01 = + 1
+// +ff & - 1 = ...0 ff & ...f ff = ...0 ff = +ff
+// answer is pos, has length of a
+fn bitand_pos_neg(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
+ let mut carry_b = 1;
+ for (ai, &bi) in a.iter_mut().zip(b.iter()) {
+ let twos_b = negate_carry(bi, &mut carry_b);
+ *ai &= twos_b;
+ }
+ debug_assert!(b.len() > a.len() || carry_b == 0);
+}
+
+// - 1 & +ff = ...f ff & ...0 ff = ...0 ff = +ff
+// -ff & + 1 = ...f 01 & ...0 01 = ...0 01 = + 1
+// answer is pos, has length of b
+fn bitand_neg_pos(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
+ let mut carry_a = 1;
+ for (ai, &bi) in a.iter_mut().zip(b.iter()) {
+ let twos_a = negate_carry(*ai, &mut carry_a);
+ *ai = twos_a & bi;
+ }
+ debug_assert!(a.len() > b.len() || carry_a == 0);
+ match Ord::cmp(&a.len(), &b.len()) {
+ Greater => a.truncate(b.len()),
+ Equal => {}
+ Less => {
+ let extra = &b[a.len()..];
+ a.extend(extra.iter().cloned());
+ }
+ }
+}
+
+// - 1 & -ff = ...f ff & ...f 01 = ...f 01 = - ff
+// -ff & - 1 = ...f 01 & ...f ff = ...f 01 = - ff
+// -ff & -fe = ...f 01 & ...f 02 = ...f 00 = -100
+// answer is neg, has length of longest with a possible carry
+fn bitand_neg_neg(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
+ let mut carry_a = 1;
+ let mut carry_b = 1;
+ let mut carry_and = 1;
+ for (ai, &bi) in a.iter_mut().zip(b.iter()) {
+ let twos_a = negate_carry(*ai, &mut carry_a);
+ let twos_b = negate_carry(bi, &mut carry_b);
+ *ai = negate_carry(twos_a & twos_b, &mut carry_and);
+ }
+ debug_assert!(a.len() > b.len() || carry_a == 0);
+ debug_assert!(b.len() > a.len() || carry_b == 0);
+ match Ord::cmp(&a.len(), &b.len()) {
+ Greater => {
+ for ai in a[b.len()..].iter_mut() {
+ let twos_a = negate_carry(*ai, &mut carry_a);
+ *ai = negate_carry(twos_a, &mut carry_and);
+ }
+ debug_assert!(carry_a == 0);
+ }
+ Equal => {}
+ Less => {
+ let extra = &b[a.len()..];
+ a.extend(extra.iter().map(|&bi| {
+ let twos_b = negate_carry(bi, &mut carry_b);
+ negate_carry(twos_b, &mut carry_and)
+ }));
+ debug_assert!(carry_b == 0);
+ }
+ }
+ if carry_and != 0 {
+ a.push(1);
+ }
+}
+
+forward_val_val_binop!(impl BitAnd for BigInt, bitand);
+forward_ref_val_binop!(impl BitAnd for BigInt, bitand);
+
+// do not use forward_ref_ref_binop_commutative! for bitand so that we can
+// clone as needed, avoiding over-allocation
+impl<'a, 'b> BitAnd<&'b BigInt> for &'a BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn bitand(self, other: &BigInt) -> BigInt {
+ match (self.sign, other.sign) {
+ (NoSign, _) | (_, NoSign) => BigInt::zero(),
+ (Plus, Plus) => BigInt::from(&self.data & &other.data),
+ (Plus, Minus) => self.clone() & other,
+ (Minus, Plus) => other.clone() & self,
+ (Minus, Minus) => {
+ // forward to val-ref, choosing the larger to clone
+ if self.len() >= other.len() {
+ self.clone() & other
+ } else {
+ other.clone() & self
+ }
+ }
+ }
+ }
+}
+
+impl<'a> BitAnd<&'a BigInt> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn bitand(mut self, other: &BigInt) -> BigInt {
+ self &= other;
+ self
+ }
+}
+
+forward_val_assign!(impl BitAndAssign for BigInt, bitand_assign);
+
+impl<'a> BitAndAssign<&'a BigInt> for BigInt {
+ fn bitand_assign(&mut self, other: &BigInt) {
+ match (self.sign, other.sign) {
+ (NoSign, _) => {}
+ (_, NoSign) => self.set_zero(),
+ (Plus, Plus) => {
+ self.data &= &other.data;
+ if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+ (Plus, Minus) => {
+ bitand_pos_neg(self.digits_mut(), other.digits());
+ self.normalize();
+ }
+ (Minus, Plus) => {
+ bitand_neg_pos(self.digits_mut(), other.digits());
+ self.sign = Plus;
+ self.normalize();
+ }
+ (Minus, Minus) => {
+ bitand_neg_neg(self.digits_mut(), other.digits());
+ self.normalize();
+ }
+ }
+ }
+}
+
+// + 1 | -ff = ...0 01 | ...f 01 = ...f 01 = -ff
+// +ff | - 1 = ...0 ff | ...f ff = ...f ff = - 1
+// answer is neg, has length of b
+fn bitor_pos_neg(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
+ let mut carry_b = 1;
+ let mut carry_or = 1;
+ for (ai, &bi) in a.iter_mut().zip(b.iter()) {
+ let twos_b = negate_carry(bi, &mut carry_b);
+ *ai = negate_carry(*ai | twos_b, &mut carry_or);
+ }
+ debug_assert!(b.len() > a.len() || carry_b == 0);
+ match Ord::cmp(&a.len(), &b.len()) {
+ Greater => {
+ a.truncate(b.len());
+ }
+ Equal => {}
+ Less => {
+ let extra = &b[a.len()..];
+ a.extend(extra.iter().map(|&bi| {
+ let twos_b = negate_carry(bi, &mut carry_b);
+ negate_carry(twos_b, &mut carry_or)
+ }));
+ debug_assert!(carry_b == 0);
+ }
+ }
+ // for carry_or to be non-zero, we would need twos_b == 0
+ debug_assert!(carry_or == 0);
+}
+
+// - 1 | +ff = ...f ff | ...0 ff = ...f ff = - 1
+// -ff | + 1 = ...f 01 | ...0 01 = ...f 01 = -ff
+// answer is neg, has length of a
+fn bitor_neg_pos(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
+ let mut carry_a = 1;
+ let mut carry_or = 1;
+ for (ai, &bi) in a.iter_mut().zip(b.iter()) {
+ let twos_a = negate_carry(*ai, &mut carry_a);
+ *ai = negate_carry(twos_a | bi, &mut carry_or);
+ }
+ debug_assert!(a.len() > b.len() || carry_a == 0);
+ if a.len() > b.len() {
+ for ai in a[b.len()..].iter_mut() {
+ let twos_a = negate_carry(*ai, &mut carry_a);
+ *ai = negate_carry(twos_a, &mut carry_or);
+ }
+ debug_assert!(carry_a == 0);
+ }
+ // for carry_or to be non-zero, we would need twos_a == 0
+ debug_assert!(carry_or == 0);
+}
+
+// - 1 | -ff = ...f ff | ...f 01 = ...f ff = -1
+// -ff | - 1 = ...f 01 | ...f ff = ...f ff = -1
+// answer is neg, has length of shortest
+fn bitor_neg_neg(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
+ let mut carry_a = 1;
+ let mut carry_b = 1;
+ let mut carry_or = 1;
+ for (ai, &bi) in a.iter_mut().zip(b.iter()) {
+ let twos_a = negate_carry(*ai, &mut carry_a);
+ let twos_b = negate_carry(bi, &mut carry_b);
+ *ai = negate_carry(twos_a | twos_b, &mut carry_or);
+ }
+ debug_assert!(a.len() > b.len() || carry_a == 0);
+ debug_assert!(b.len() > a.len() || carry_b == 0);
+ if a.len() > b.len() {
+ a.truncate(b.len());
+ }
+ // for carry_or to be non-zero, we would need twos_a == 0 or twos_b == 0
+ debug_assert!(carry_or == 0);
+}
+
+forward_val_val_binop!(impl BitOr for BigInt, bitor);
+forward_ref_val_binop!(impl BitOr for BigInt, bitor);
+
+// do not use forward_ref_ref_binop_commutative! for bitor so that we can
+// clone as needed, avoiding over-allocation
+impl<'a, 'b> BitOr<&'b BigInt> for &'a BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn bitor(self, other: &BigInt) -> BigInt {
+ match (self.sign, other.sign) {
+ (NoSign, _) => other.clone(),
+ (_, NoSign) => self.clone(),
+ (Plus, Plus) => BigInt::from(&self.data | &other.data),
+ (Plus, Minus) => other.clone() | self,
+ (Minus, Plus) => self.clone() | other,
+ (Minus, Minus) => {
+ // forward to val-ref, choosing the smaller to clone
+ if self.len() <= other.len() {
+ self.clone() | other
+ } else {
+ other.clone() | self
+ }
+ }
+ }
+ }
+}
+
+impl<'a> BitOr<&'a BigInt> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn bitor(mut self, other: &BigInt) -> BigInt {
+ self |= other;
+ self
+ }
+}
+
+forward_val_assign!(impl BitOrAssign for BigInt, bitor_assign);
+
+impl<'a> BitOrAssign<&'a BigInt> for BigInt {
+ fn bitor_assign(&mut self, other: &BigInt) {
+ match (self.sign, other.sign) {
+ (_, NoSign) => {}
+ (NoSign, _) => self.clone_from(other),
+ (Plus, Plus) => self.data |= &other.data,
+ (Plus, Minus) => {
+ bitor_pos_neg(self.digits_mut(), other.digits());
+ self.sign = Minus;
+ self.normalize();
+ }
+ (Minus, Plus) => {
+ bitor_neg_pos(self.digits_mut(), other.digits());
+ self.normalize();
+ }
+ (Minus, Minus) => {
+ bitor_neg_neg(self.digits_mut(), other.digits());
+ self.normalize();
+ }
+ }
+ }
+}
+
+// + 1 ^ -ff = ...0 01 ^ ...f 01 = ...f 00 = -100
+// +ff ^ - 1 = ...0 ff ^ ...f ff = ...f 00 = -100
+// answer is neg, has length of longest with a possible carry
+fn bitxor_pos_neg(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
+ let mut carry_b = 1;
+ let mut carry_xor = 1;
+ for (ai, &bi) in a.iter_mut().zip(b.iter()) {
+ let twos_b = negate_carry(bi, &mut carry_b);
+ *ai = negate_carry(*ai ^ twos_b, &mut carry_xor);
+ }
+ debug_assert!(b.len() > a.len() || carry_b == 0);
+ match Ord::cmp(&a.len(), &b.len()) {
+ Greater => {
+ for ai in a[b.len()..].iter_mut() {
+ let twos_b = !0;
+ *ai = negate_carry(*ai ^ twos_b, &mut carry_xor);
+ }
+ }
+ Equal => {}
+ Less => {
+ let extra = &b[a.len()..];
+ a.extend(extra.iter().map(|&bi| {
+ let twos_b = negate_carry(bi, &mut carry_b);
+ negate_carry(twos_b, &mut carry_xor)
+ }));
+ debug_assert!(carry_b == 0);
+ }
+ }
+ if carry_xor != 0 {
+ a.push(1);
+ }
+}
+
+// - 1 ^ +ff = ...f ff ^ ...0 ff = ...f 00 = -100
+// -ff ^ + 1 = ...f 01 ^ ...0 01 = ...f 00 = -100
+// answer is neg, has length of longest with a possible carry
+fn bitxor_neg_pos(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
+ let mut carry_a = 1;
+ let mut carry_xor = 1;
+ for (ai, &bi) in a.iter_mut().zip(b.iter()) {
+ let twos_a = negate_carry(*ai, &mut carry_a);
+ *ai = negate_carry(twos_a ^ bi, &mut carry_xor);
+ }
+ debug_assert!(a.len() > b.len() || carry_a == 0);
+ match Ord::cmp(&a.len(), &b.len()) {
+ Greater => {
+ for ai in a[b.len()..].iter_mut() {
+ let twos_a = negate_carry(*ai, &mut carry_a);
+ *ai = negate_carry(twos_a, &mut carry_xor);
+ }
+ debug_assert!(carry_a == 0);
+ }
+ Equal => {}
+ Less => {
+ let extra = &b[a.len()..];
+ a.extend(extra.iter().map(|&bi| {
+ let twos_a = !0;
+ negate_carry(twos_a ^ bi, &mut carry_xor)
+ }));
+ }
+ }
+ if carry_xor != 0 {
+ a.push(1);
+ }
+}
+
+// - 1 ^ -ff = ...f ff ^ ...f 01 = ...0 fe = +fe
+// -ff & - 1 = ...f 01 ^ ...f ff = ...0 fe = +fe
+// answer is pos, has length of longest
+fn bitxor_neg_neg(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
+ let mut carry_a = 1;
+ let mut carry_b = 1;
+ for (ai, &bi) in a.iter_mut().zip(b.iter()) {
+ let twos_a = negate_carry(*ai, &mut carry_a);
+ let twos_b = negate_carry(bi, &mut carry_b);
+ *ai = twos_a ^ twos_b;
+ }
+ debug_assert!(a.len() > b.len() || carry_a == 0);
+ debug_assert!(b.len() > a.len() || carry_b == 0);
+ match Ord::cmp(&a.len(), &b.len()) {
+ Greater => {
+ for ai in a[b.len()..].iter_mut() {
+ let twos_a = negate_carry(*ai, &mut carry_a);
+ let twos_b = !0;
+ *ai = twos_a ^ twos_b;
+ }
+ debug_assert!(carry_a == 0);
+ }
+ Equal => {}
+ Less => {
+ let extra = &b[a.len()..];
+ a.extend(extra.iter().map(|&bi| {
+ let twos_a = !0;
+ let twos_b = negate_carry(bi, &mut carry_b);
+ twos_a ^ twos_b
+ }));
+ debug_assert!(carry_b == 0);
+ }
+ }
+}
+
+forward_all_binop_to_val_ref_commutative!(impl BitXor for BigInt, bitxor);
+
+impl<'a> BitXor<&'a BigInt> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn bitxor(mut self, other: &BigInt) -> BigInt {
+ self ^= other;
+ self
+ }
+}
+
+forward_val_assign!(impl BitXorAssign for BigInt, bitxor_assign);
+
+impl<'a> BitXorAssign<&'a BigInt> for BigInt {
+ fn bitxor_assign(&mut self, other: &BigInt) {
+ match (self.sign, other.sign) {
+ (_, NoSign) => {}
+ (NoSign, _) => self.clone_from(other),
+ (Plus, Plus) => {
+ self.data ^= &other.data;
+ if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+ (Plus, Minus) => {
+ bitxor_pos_neg(self.digits_mut(), other.digits());
+ self.sign = Minus;
+ self.normalize();
+ }
+ (Minus, Plus) => {
+ bitxor_neg_pos(self.digits_mut(), other.digits());
+ self.normalize();
+ }
+ (Minus, Minus) => {
+ bitxor_neg_neg(self.digits_mut(), other.digits());
+ self.sign = Plus;
+ self.normalize();
+ }
+ }
+ }
+}
+
+pub(super) fn set_negative_bit(x: &mut BigInt, bit: u64, value: bool) {
+ debug_assert_eq!(x.sign, Minus);
+ let data = &mut x.data;
+
+ let bits_per_digit = u64::from(big_digit::BITS);
+ if bit >= bits_per_digit * data.len() as u64 {
+ if !value {
+ data.set_bit(bit, true);
+ }
+ } else {
+ // If the Uint number is
+ // ... 0 x 1 0 ... 0
+ // then the two's complement is
+ // ... 1 !x 1 0 ... 0
+ // |-- bit at position 'trailing_zeros'
+ // where !x is obtained from x by flipping each bit
+ let trailing_zeros = data.trailing_zeros().unwrap();
+ if bit > trailing_zeros {
+ data.set_bit(bit, !value);
+ } else if bit == trailing_zeros && !value {
+ // Clearing the bit at position `trailing_zeros` is dealt with by doing
+ // similarly to what `bitand_neg_pos` does, except we start at digit
+ // `bit_index`. All digits below `bit_index` are guaranteed to be zero,
+ // so initially we have `carry_in` = `carry_out` = 1. Furthermore, we
+ // stop traversing the digits when there are no more carries.
+ let bit_index = (bit / bits_per_digit).to_usize().unwrap();
+ let bit_mask = (1 as BigDigit) << (bit % bits_per_digit);
+ let mut digit_iter = data.digits_mut().iter_mut().skip(bit_index);
+ let mut carry_in = 1;
+ let mut carry_out = 1;
+
+ let digit = digit_iter.next().unwrap();
+ let twos_in = negate_carry(*digit, &mut carry_in);
+ let twos_out = twos_in & !bit_mask;
+ *digit = negate_carry(twos_out, &mut carry_out);
+
+ for digit in digit_iter {
+ if carry_in == 0 && carry_out == 0 {
+ // Exit the loop since no more digits can change
+ break;
+ }
+ let twos = negate_carry(*digit, &mut carry_in);
+ *digit = negate_carry(twos, &mut carry_out);
+ }
+
+ if carry_out != 0 {
+ // All digits have been traversed and there is a carry
+ debug_assert_eq!(carry_in, 0);
+ data.digits_mut().push(1);
+ }
+ } else if bit < trailing_zeros && value {
+ // Flip each bit from position 'bit' to 'trailing_zeros', both inclusive
+ // ... 1 !x 1 0 ... 0 ... 0
+ // |-- bit at position 'bit'
+ // |-- bit at position 'trailing_zeros'
+ // bit_mask: 1 1 ... 1 0 .. 0
+ // This is done by xor'ing with the bit_mask
+ let index_lo = (bit / bits_per_digit).to_usize().unwrap();
+ let index_hi = (trailing_zeros / bits_per_digit).to_usize().unwrap();
+ let bit_mask_lo = big_digit::MAX << (bit % bits_per_digit);
+ let bit_mask_hi =
+ big_digit::MAX >> (bits_per_digit - 1 - (trailing_zeros % bits_per_digit));
+ let digits = data.digits_mut();
+
+ if index_lo == index_hi {
+ digits[index_lo] ^= bit_mask_lo & bit_mask_hi;
+ } else {
+ digits[index_lo] = bit_mask_lo;
+ for digit in &mut digits[index_lo + 1..index_hi] {
+ *digit = big_digit::MAX;
+ }
+ digits[index_hi] ^= bit_mask_hi;
+ }
+ } else {
+ // We end up here in two cases:
+ // bit == trailing_zeros && value: Bit is already set
+ // bit < trailing_zeros && !value: Bit is already cleared
+ }
+ }
+}
diff --git a/src/bigint/convert.rs b/src/bigint/convert.rs
new file mode 100644
index 0000000..ff8e04e
--- /dev/null
+++ b/src/bigint/convert.rs
@@ -0,0 +1,469 @@
+use super::Sign::{self, Minus, NoSign, Plus};
+use super::{BigInt, ToBigInt};
+
+use crate::std_alloc::Vec;
+#[cfg(has_try_from)]
+use crate::TryFromBigIntError;
+use crate::{BigUint, ParseBigIntError, ToBigUint};
+
+use core::cmp::Ordering::{Equal, Greater, Less};
+#[cfg(has_try_from)]
+use core::convert::TryFrom;
+use core::str::{self, FromStr};
+use num_traits::{FromPrimitive, Num, ToPrimitive, Zero};
+
+impl FromStr for BigInt {
+ type Err = ParseBigIntError;
+
+ #[inline]
+ fn from_str(s: &str) -> Result<BigInt, ParseBigIntError> {
+ BigInt::from_str_radix(s, 10)
+ }
+}
+
+impl Num for BigInt {
+ type FromStrRadixErr = ParseBigIntError;
+
+ /// Creates and initializes a BigInt.
+ #[inline]
+ fn from_str_radix(mut s: &str, radix: u32) -> Result<BigInt, ParseBigIntError> {
+ let sign = if s.starts_with('-') {
+ let tail = &s[1..];
+ if !tail.starts_with('+') {
+ s = tail
+ }
+ Minus
+ } else {
+ Plus
+ };
+ let bu = BigUint::from_str_radix(s, radix)?;
+ Ok(BigInt::from_biguint(sign, bu))
+ }
+}
+
+impl ToPrimitive for BigInt {
+ #[inline]
+ fn to_i64(&self) -> Option<i64> {
+ match self.sign {
+ Plus => self.data.to_i64(),
+ NoSign => Some(0),
+ Minus => {
+ let n = self.data.to_u64()?;
+ let m: u64 = 1 << 63;
+ match n.cmp(&m) {
+ Less => Some(-(n as i64)),
+ Equal => Some(core::i64::MIN),
+ Greater => None,
+ }
+ }
+ }
+ }
+
+ #[inline]
+ fn to_i128(&self) -> Option<i128> {
+ match self.sign {
+ Plus => self.data.to_i128(),
+ NoSign => Some(0),
+ Minus => {
+ let n = self.data.to_u128()?;
+ let m: u128 = 1 << 127;
+ match n.cmp(&m) {
+ Less => Some(-(n as i128)),
+ Equal => Some(core::i128::MIN),
+ Greater => None,
+ }
+ }
+ }
+ }
+
+ #[inline]
+ fn to_u64(&self) -> Option<u64> {
+ match self.sign {
+ Plus => self.data.to_u64(),
+ NoSign => Some(0),
+ Minus => None,
+ }
+ }
+
+ #[inline]
+ fn to_u128(&self) -> Option<u128> {
+ match self.sign {
+ Plus => self.data.to_u128(),
+ NoSign => Some(0),
+ Minus => None,
+ }
+ }
+
+ #[inline]
+ fn to_f32(&self) -> Option<f32> {
+ let n = self.data.to_f32()?;
+ Some(if self.sign == Minus { -n } else { n })
+ }
+
+ #[inline]
+ fn to_f64(&self) -> Option<f64> {
+ let n = self.data.to_f64()?;
+ Some(if self.sign == Minus { -n } else { n })
+ }
+}
+
+macro_rules! impl_try_from_bigint {
+ ($T:ty, $to_ty:path) => {
+ #[cfg(has_try_from)]
+ impl TryFrom<&BigInt> for $T {
+ type Error = TryFromBigIntError<()>;
+
+ #[inline]
+ fn try_from(value: &BigInt) -> Result<$T, TryFromBigIntError<()>> {
+ $to_ty(value).ok_or(TryFromBigIntError::new(()))
+ }
+ }
+
+ #[cfg(has_try_from)]
+ impl TryFrom<BigInt> for $T {
+ type Error = TryFromBigIntError<BigInt>;
+
+ #[inline]
+ fn try_from(value: BigInt) -> Result<$T, TryFromBigIntError<BigInt>> {
+ <$T>::try_from(&value).map_err(|_| TryFromBigIntError::new(value))
+ }
+ }
+ };
+}
+
+impl_try_from_bigint!(u8, ToPrimitive::to_u8);
+impl_try_from_bigint!(u16, ToPrimitive::to_u16);
+impl_try_from_bigint!(u32, ToPrimitive::to_u32);
+impl_try_from_bigint!(u64, ToPrimitive::to_u64);
+impl_try_from_bigint!(usize, ToPrimitive::to_usize);
+impl_try_from_bigint!(u128, ToPrimitive::to_u128);
+
+impl_try_from_bigint!(i8, ToPrimitive::to_i8);
+impl_try_from_bigint!(i16, ToPrimitive::to_i16);
+impl_try_from_bigint!(i32, ToPrimitive::to_i32);
+impl_try_from_bigint!(i64, ToPrimitive::to_i64);
+impl_try_from_bigint!(isize, ToPrimitive::to_isize);
+impl_try_from_bigint!(i128, ToPrimitive::to_i128);
+
+impl FromPrimitive for BigInt {
+ #[inline]
+ fn from_i64(n: i64) -> Option<BigInt> {
+ Some(BigInt::from(n))
+ }
+
+ #[inline]
+ fn from_i128(n: i128) -> Option<BigInt> {
+ Some(BigInt::from(n))
+ }
+
+ #[inline]
+ fn from_u64(n: u64) -> Option<BigInt> {
+ Some(BigInt::from(n))
+ }
+
+ #[inline]
+ fn from_u128(n: u128) -> Option<BigInt> {
+ Some(BigInt::from(n))
+ }
+
+ #[inline]
+ fn from_f64(n: f64) -> Option<BigInt> {
+ if n >= 0.0 {
+ BigUint::from_f64(n).map(BigInt::from)
+ } else {
+ let x = BigUint::from_f64(-n)?;
+ Some(-BigInt::from(x))
+ }
+ }
+}
+
+impl From<i64> for BigInt {
+ #[inline]
+ fn from(n: i64) -> Self {
+ if n >= 0 {
+ BigInt::from(n as u64)
+ } else {
+ let u = core::u64::MAX - (n as u64) + 1;
+ BigInt {
+ sign: Minus,
+ data: BigUint::from(u),
+ }
+ }
+ }
+}
+
+impl From<i128> for BigInt {
+ #[inline]
+ fn from(n: i128) -> Self {
+ if n >= 0 {
+ BigInt::from(n as u128)
+ } else {
+ let u = core::u128::MAX - (n as u128) + 1;
+ BigInt {
+ sign: Minus,
+ data: BigUint::from(u),
+ }
+ }
+ }
+}
+
+macro_rules! impl_bigint_from_int {
+ ($T:ty) => {
+ impl From<$T> for BigInt {
+ #[inline]
+ fn from(n: $T) -> Self {
+ BigInt::from(n as i64)
+ }
+ }
+ };
+}
+
+impl_bigint_from_int!(i8);
+impl_bigint_from_int!(i16);
+impl_bigint_from_int!(i32);
+impl_bigint_from_int!(isize);
+
+impl From<u64> for BigInt {
+ #[inline]
+ fn from(n: u64) -> Self {
+ if n > 0 {
+ BigInt {
+ sign: Plus,
+ data: BigUint::from(n),
+ }
+ } else {
+ BigInt::zero()
+ }
+ }
+}
+
+impl From<u128> for BigInt {
+ #[inline]
+ fn from(n: u128) -> Self {
+ if n > 0 {
+ BigInt {
+ sign: Plus,
+ data: BigUint::from(n),
+ }
+ } else {
+ BigInt::zero()
+ }
+ }
+}
+
+macro_rules! impl_bigint_from_uint {
+ ($T:ty) => {
+ impl From<$T> for BigInt {
+ #[inline]
+ fn from(n: $T) -> Self {
+ BigInt::from(n as u64)
+ }
+ }
+ };
+}
+
+impl_bigint_from_uint!(u8);
+impl_bigint_from_uint!(u16);
+impl_bigint_from_uint!(u32);
+impl_bigint_from_uint!(usize);
+
+impl From<BigUint> for BigInt {
+ #[inline]
+ fn from(n: BigUint) -> Self {
+ if n.is_zero() {
+ BigInt::zero()
+ } else {
+ BigInt {
+ sign: Plus,
+ data: n,
+ }
+ }
+ }
+}
+
+impl ToBigInt for BigInt {
+ #[inline]
+ fn to_bigint(&self) -> Option<BigInt> {
+ Some(self.clone())
+ }
+}
+
+impl ToBigInt for BigUint {
+ #[inline]
+ fn to_bigint(&self) -> Option<BigInt> {
+ if self.is_zero() {
+ Some(Zero::zero())
+ } else {
+ Some(BigInt {
+ sign: Plus,
+ data: self.clone(),
+ })
+ }
+ }
+}
+
+impl ToBigUint for BigInt {
+ #[inline]
+ fn to_biguint(&self) -> Option<BigUint> {
+ match self.sign() {
+ Plus => Some(self.data.clone()),
+ NoSign => Some(Zero::zero()),
+ Minus => None,
+ }
+ }
+}
+
+#[cfg(has_try_from)]
+impl TryFrom<&BigInt> for BigUint {
+ type Error = TryFromBigIntError<()>;
+
+ #[inline]
+ fn try_from(value: &BigInt) -> Result<BigUint, TryFromBigIntError<()>> {
+ value
+ .to_biguint()
+ .ok_or_else(|| TryFromBigIntError::new(()))
+ }
+}
+
+#[cfg(has_try_from)]
+impl TryFrom<BigInt> for BigUint {
+ type Error = TryFromBigIntError<BigInt>;
+
+ #[inline]
+ fn try_from(value: BigInt) -> Result<BigUint, TryFromBigIntError<BigInt>> {
+ if value.sign() == Sign::Minus {
+ Err(TryFromBigIntError::new(value))
+ } else {
+ Ok(value.data)
+ }
+ }
+}
+
+macro_rules! impl_to_bigint {
+ ($T:ty, $from_ty:path) => {
+ impl ToBigInt for $T {
+ #[inline]
+ fn to_bigint(&self) -> Option<BigInt> {
+ $from_ty(*self)
+ }
+ }
+ };
+}
+
+impl_to_bigint!(isize, FromPrimitive::from_isize);
+impl_to_bigint!(i8, FromPrimitive::from_i8);
+impl_to_bigint!(i16, FromPrimitive::from_i16);
+impl_to_bigint!(i32, FromPrimitive::from_i32);
+impl_to_bigint!(i64, FromPrimitive::from_i64);
+impl_to_bigint!(i128, FromPrimitive::from_i128);
+
+impl_to_bigint!(usize, FromPrimitive::from_usize);
+impl_to_bigint!(u8, FromPrimitive::from_u8);
+impl_to_bigint!(u16, FromPrimitive::from_u16);
+impl_to_bigint!(u32, FromPrimitive::from_u32);
+impl_to_bigint!(u64, FromPrimitive::from_u64);
+impl_to_bigint!(u128, FromPrimitive::from_u128);
+
+impl_to_bigint!(f32, FromPrimitive::from_f32);
+impl_to_bigint!(f64, FromPrimitive::from_f64);
+
+#[inline]
+pub(super) fn from_signed_bytes_be(digits: &[u8]) -> BigInt {
+ let sign = match digits.first() {
+ Some(v) if *v > 0x7f => Sign::Minus,
+ Some(_) => Sign::Plus,
+ None => return BigInt::zero(),
+ };
+
+ if sign == Sign::Minus {
+ // two's-complement the content to retrieve the magnitude
+ let mut digits = Vec::from(digits);
+ twos_complement_be(&mut digits);
+ BigInt::from_biguint(sign, BigUint::from_bytes_be(&*digits))
+ } else {
+ BigInt::from_biguint(sign, BigUint::from_bytes_be(digits))
+ }
+}
+
+#[inline]
+pub(super) fn from_signed_bytes_le(digits: &[u8]) -> BigInt {
+ let sign = match digits.last() {
+ Some(v) if *v > 0x7f => Sign::Minus,
+ Some(_) => Sign::Plus,
+ None => return BigInt::zero(),
+ };
+
+ if sign == Sign::Minus {
+ // two's-complement the content to retrieve the magnitude
+ let mut digits = Vec::from(digits);
+ twos_complement_le(&mut digits);
+ BigInt::from_biguint(sign, BigUint::from_bytes_le(&*digits))
+ } else {
+ BigInt::from_biguint(sign, BigUint::from_bytes_le(digits))
+ }
+}
+
+#[inline]
+pub(super) fn to_signed_bytes_be(x: &BigInt) -> Vec<u8> {
+ let mut bytes = x.data.to_bytes_be();
+ let first_byte = bytes.first().cloned().unwrap_or(0);
+ if first_byte > 0x7f
+ && !(first_byte == 0x80 && bytes.iter().skip(1).all(Zero::is_zero) && x.sign == Sign::Minus)
+ {
+ // msb used by magnitude, extend by 1 byte
+ bytes.insert(0, 0);
+ }
+ if x.sign == Sign::Minus {
+ twos_complement_be(&mut bytes);
+ }
+ bytes
+}
+
+#[inline]
+pub(super) fn to_signed_bytes_le(x: &BigInt) -> Vec<u8> {
+ let mut bytes = x.data.to_bytes_le();
+ let last_byte = bytes.last().cloned().unwrap_or(0);
+ if last_byte > 0x7f
+ && !(last_byte == 0x80
+ && bytes.iter().rev().skip(1).all(Zero::is_zero)
+ && x.sign == Sign::Minus)
+ {
+ // msb used by magnitude, extend by 1 byte
+ bytes.push(0);
+ }
+ if x.sign == Sign::Minus {
+ twos_complement_le(&mut bytes);
+ }
+ bytes
+}
+
+/// Perform in-place two's complement of the given binary representation,
+/// in little-endian byte order.
+#[inline]
+fn twos_complement_le(digits: &mut [u8]) {
+ twos_complement(digits)
+}
+
+/// Perform in-place two's complement of the given binary representation
+/// in big-endian byte order.
+#[inline]
+fn twos_complement_be(digits: &mut [u8]) {
+ twos_complement(digits.iter_mut().rev())
+}
+
+/// Perform in-place two's complement of the given digit iterator
+/// starting from the least significant byte.
+#[inline]
+fn twos_complement<'a, I>(digits: I)
+where
+ I: IntoIterator<Item = &'a mut u8>,
+{
+ let mut carry = true;
+ for d in digits {
+ *d = !*d;
+ if carry {
+ *d = d.wrapping_add(1);
+ carry = d.is_zero();
+ }
+ }
+}
diff --git a/src/bigint/division.rs b/src/bigint/division.rs
new file mode 100644
index 0000000..a702b8f
--- /dev/null
+++ b/src/bigint/division.rs
@@ -0,0 +1,448 @@
+use super::CheckedUnsignedAbs::{Negative, Positive};
+use super::Sign::NoSign;
+use super::{BigInt, UnsignedAbs};
+
+use crate::{IsizePromotion, UsizePromotion};
+
+use core::ops::{Div, DivAssign, Rem, RemAssign};
+use num_integer::Integer;
+use num_traits::{CheckedDiv, ToPrimitive, Zero};
+
+forward_all_binop_to_ref_ref!(impl Div for BigInt, div);
+
+impl<'a, 'b> Div<&'b BigInt> for &'a BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: &BigInt) -> BigInt {
+ let (q, _) = self.div_rem(other);
+ q
+ }
+}
+
+impl<'a> DivAssign<&'a BigInt> for BigInt {
+ #[inline]
+ fn div_assign(&mut self, other: &BigInt) {
+ *self = &*self / other;
+ }
+}
+forward_val_assign!(impl DivAssign for BigInt, div_assign);
+
+promote_all_scalars!(impl Div for BigInt, div);
+promote_all_scalars_assign!(impl DivAssign for BigInt, div_assign);
+forward_all_scalar_binop_to_val_val!(impl Div<u32> for BigInt, div);
+forward_all_scalar_binop_to_val_val!(impl Div<u64> for BigInt, div);
+forward_all_scalar_binop_to_val_val!(impl Div<u128> for BigInt, div);
+
+impl Div<u32> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: u32) -> BigInt {
+ BigInt::from_biguint(self.sign, self.data / other)
+ }
+}
+
+impl DivAssign<u32> for BigInt {
+ #[inline]
+ fn div_assign(&mut self, other: u32) {
+ self.data /= other;
+ if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+}
+
+impl Div<BigInt> for u32 {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: BigInt) -> BigInt {
+ BigInt::from_biguint(other.sign, self / other.data)
+ }
+}
+
+impl Div<u64> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: u64) -> BigInt {
+ BigInt::from_biguint(self.sign, self.data / other)
+ }
+}
+
+impl DivAssign<u64> for BigInt {
+ #[inline]
+ fn div_assign(&mut self, other: u64) {
+ self.data /= other;
+ if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+}
+
+impl Div<BigInt> for u64 {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: BigInt) -> BigInt {
+ BigInt::from_biguint(other.sign, self / other.data)
+ }
+}
+
+impl Div<u128> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: u128) -> BigInt {
+ BigInt::from_biguint(self.sign, self.data / other)
+ }
+}
+
+impl DivAssign<u128> for BigInt {
+ #[inline]
+ fn div_assign(&mut self, other: u128) {
+ self.data /= other;
+ if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+}
+
+impl Div<BigInt> for u128 {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: BigInt) -> BigInt {
+ BigInt::from_biguint(other.sign, self / other.data)
+ }
+}
+
+forward_all_scalar_binop_to_val_val!(impl Div<i32> for BigInt, div);
+forward_all_scalar_binop_to_val_val!(impl Div<i64> for BigInt, div);
+forward_all_scalar_binop_to_val_val!(impl Div<i128> for BigInt, div);
+
+impl Div<i32> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: i32) -> BigInt {
+ match other.checked_uabs() {
+ Positive(u) => self / u,
+ Negative(u) => -self / u,
+ }
+ }
+}
+
+impl DivAssign<i32> for BigInt {
+ #[inline]
+ fn div_assign(&mut self, other: i32) {
+ match other.checked_uabs() {
+ Positive(u) => *self /= u,
+ Negative(u) => {
+ self.sign = -self.sign;
+ *self /= u;
+ }
+ }
+ }
+}
+
+impl Div<BigInt> for i32 {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: BigInt) -> BigInt {
+ match self.checked_uabs() {
+ Positive(u) => u / other,
+ Negative(u) => u / -other,
+ }
+ }
+}
+
+impl Div<i64> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: i64) -> BigInt {
+ match other.checked_uabs() {
+ Positive(u) => self / u,
+ Negative(u) => -self / u,
+ }
+ }
+}
+
+impl DivAssign<i64> for BigInt {
+ #[inline]
+ fn div_assign(&mut self, other: i64) {
+ match other.checked_uabs() {
+ Positive(u) => *self /= u,
+ Negative(u) => {
+ self.sign = -self.sign;
+ *self /= u;
+ }
+ }
+ }
+}
+
+impl Div<BigInt> for i64 {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: BigInt) -> BigInt {
+ match self.checked_uabs() {
+ Positive(u) => u / other,
+ Negative(u) => u / -other,
+ }
+ }
+}
+
+impl Div<i128> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: i128) -> BigInt {
+ match other.checked_uabs() {
+ Positive(u) => self / u,
+ Negative(u) => -self / u,
+ }
+ }
+}
+
+impl DivAssign<i128> for BigInt {
+ #[inline]
+ fn div_assign(&mut self, other: i128) {
+ match other.checked_uabs() {
+ Positive(u) => *self /= u,
+ Negative(u) => {
+ self.sign = -self.sign;
+ *self /= u;
+ }
+ }
+ }
+}
+
+impl Div<BigInt> for i128 {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: BigInt) -> BigInt {
+ match self.checked_uabs() {
+ Positive(u) => u / other,
+ Negative(u) => u / -other,
+ }
+ }
+}
+
+forward_all_binop_to_ref_ref!(impl Rem for BigInt, rem);
+
+impl<'a, 'b> Rem<&'b BigInt> for &'a BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: &BigInt) -> BigInt {
+ if let Some(other) = other.to_u32() {
+ self % other
+ } else if let Some(other) = other.to_i32() {
+ self % other
+ } else {
+ let (_, r) = self.div_rem(other);
+ r
+ }
+ }
+}
+
+impl<'a> RemAssign<&'a BigInt> for BigInt {
+ #[inline]
+ fn rem_assign(&mut self, other: &BigInt) {
+ *self = &*self % other;
+ }
+}
+forward_val_assign!(impl RemAssign for BigInt, rem_assign);
+
+promote_all_scalars!(impl Rem for BigInt, rem);
+promote_all_scalars_assign!(impl RemAssign for BigInt, rem_assign);
+forward_all_scalar_binop_to_val_val!(impl Rem<u32> for BigInt, rem);
+forward_all_scalar_binop_to_val_val!(impl Rem<u64> for BigInt, rem);
+forward_all_scalar_binop_to_val_val!(impl Rem<u128> for BigInt, rem);
+
+impl Rem<u32> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: u32) -> BigInt {
+ BigInt::from_biguint(self.sign, self.data % other)
+ }
+}
+
+impl RemAssign<u32> for BigInt {
+ #[inline]
+ fn rem_assign(&mut self, other: u32) {
+ self.data %= other;
+ if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+}
+
+impl Rem<BigInt> for u32 {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: BigInt) -> BigInt {
+ BigInt::from(self % other.data)
+ }
+}
+
+impl Rem<u64> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: u64) -> BigInt {
+ BigInt::from_biguint(self.sign, self.data % other)
+ }
+}
+
+impl RemAssign<u64> for BigInt {
+ #[inline]
+ fn rem_assign(&mut self, other: u64) {
+ self.data %= other;
+ if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+}
+
+impl Rem<BigInt> for u64 {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: BigInt) -> BigInt {
+ BigInt::from(self % other.data)
+ }
+}
+
+impl Rem<u128> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: u128) -> BigInt {
+ BigInt::from_biguint(self.sign, self.data % other)
+ }
+}
+
+impl RemAssign<u128> for BigInt {
+ #[inline]
+ fn rem_assign(&mut self, other: u128) {
+ self.data %= other;
+ if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+}
+
+impl Rem<BigInt> for u128 {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: BigInt) -> BigInt {
+ BigInt::from(self % other.data)
+ }
+}
+
+forward_all_scalar_binop_to_val_val!(impl Rem<i32> for BigInt, rem);
+forward_all_scalar_binop_to_val_val!(impl Rem<i64> for BigInt, rem);
+forward_all_scalar_binop_to_val_val!(impl Rem<i128> for BigInt, rem);
+
+impl Rem<i32> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: i32) -> BigInt {
+ self % other.uabs()
+ }
+}
+
+impl RemAssign<i32> for BigInt {
+ #[inline]
+ fn rem_assign(&mut self, other: i32) {
+ *self %= other.uabs();
+ }
+}
+
+impl Rem<BigInt> for i32 {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: BigInt) -> BigInt {
+ match self.checked_uabs() {
+ Positive(u) => u % other,
+ Negative(u) => -(u % other),
+ }
+ }
+}
+
+impl Rem<i64> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: i64) -> BigInt {
+ self % other.uabs()
+ }
+}
+
+impl RemAssign<i64> for BigInt {
+ #[inline]
+ fn rem_assign(&mut self, other: i64) {
+ *self %= other.uabs();
+ }
+}
+
+impl Rem<BigInt> for i64 {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: BigInt) -> BigInt {
+ match self.checked_uabs() {
+ Positive(u) => u % other,
+ Negative(u) => -(u % other),
+ }
+ }
+}
+
+impl Rem<i128> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: i128) -> BigInt {
+ self % other.uabs()
+ }
+}
+
+impl RemAssign<i128> for BigInt {
+ #[inline]
+ fn rem_assign(&mut self, other: i128) {
+ *self %= other.uabs();
+ }
+}
+
+impl Rem<BigInt> for i128 {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: BigInt) -> BigInt {
+ match self.checked_uabs() {
+ Positive(u) => u % other,
+ Negative(u) => -(u % other),
+ }
+ }
+}
+
+impl CheckedDiv for BigInt {
+ #[inline]
+ fn checked_div(&self, v: &BigInt) -> Option<BigInt> {
+ if v.is_zero() {
+ return None;
+ }
+ Some(self.div(v))
+ }
+}
diff --git a/src/bigint/multiplication.rs b/src/bigint/multiplication.rs
new file mode 100644
index 0000000..aaf5b14
--- /dev/null
+++ b/src/bigint/multiplication.rs
@@ -0,0 +1,192 @@
+use super::CheckedUnsignedAbs::{Negative, Positive};
+use super::Sign::{self, Minus, NoSign, Plus};
+use super::{BigInt, UnsignedAbs};
+
+use crate::{IsizePromotion, UsizePromotion};
+
+use core::iter::Product;
+use core::ops::{Mul, MulAssign};
+use num_traits::{CheckedMul, One, Zero};
+
+impl Mul<Sign> for Sign {
+ type Output = Sign;
+
+ #[inline]
+ fn mul(self, other: Sign) -> Sign {
+ match (self, other) {
+ (NoSign, _) | (_, NoSign) => NoSign,
+ (Plus, Plus) | (Minus, Minus) => Plus,
+ (Plus, Minus) | (Minus, Plus) => Minus,
+ }
+ }
+}
+
+forward_all_binop_to_ref_ref!(impl Mul for BigInt, mul);
+
+impl<'a, 'b> Mul<&'b BigInt> for &'a BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn mul(self, other: &BigInt) -> BigInt {
+ BigInt::from_biguint(self.sign * other.sign, &self.data * &other.data)
+ }
+}
+
+impl<'a> MulAssign<&'a BigInt> for BigInt {
+ #[inline]
+ fn mul_assign(&mut self, other: &BigInt) {
+ *self = &*self * other;
+ }
+}
+forward_val_assign!(impl MulAssign for BigInt, mul_assign);
+
+promote_all_scalars!(impl Mul for BigInt, mul);
+promote_all_scalars_assign!(impl MulAssign for BigInt, mul_assign);
+forward_all_scalar_binop_to_val_val_commutative!(impl Mul<u32> for BigInt, mul);
+forward_all_scalar_binop_to_val_val_commutative!(impl Mul<u64> for BigInt, mul);
+forward_all_scalar_binop_to_val_val_commutative!(impl Mul<u128> for BigInt, mul);
+
+impl Mul<u32> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn mul(self, other: u32) -> BigInt {
+ BigInt::from_biguint(self.sign, self.data * other)
+ }
+}
+
+impl MulAssign<u32> for BigInt {
+ #[inline]
+ fn mul_assign(&mut self, other: u32) {
+ self.data *= other;
+ if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+}
+
+impl Mul<u64> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn mul(self, other: u64) -> BigInt {
+ BigInt::from_biguint(self.sign, self.data * other)
+ }
+}
+
+impl MulAssign<u64> for BigInt {
+ #[inline]
+ fn mul_assign(&mut self, other: u64) {
+ self.data *= other;
+ if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+}
+
+impl Mul<u128> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn mul(self, other: u128) -> BigInt {
+ BigInt::from_biguint(self.sign, self.data * other)
+ }
+}
+
+impl MulAssign<u128> for BigInt {
+ #[inline]
+ fn mul_assign(&mut self, other: u128) {
+ self.data *= other;
+ if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+}
+
+forward_all_scalar_binop_to_val_val_commutative!(impl Mul<i32> for BigInt, mul);
+forward_all_scalar_binop_to_val_val_commutative!(impl Mul<i64> for BigInt, mul);
+forward_all_scalar_binop_to_val_val_commutative!(impl Mul<i128> for BigInt, mul);
+
+impl Mul<i32> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn mul(self, other: i32) -> BigInt {
+ match other.checked_uabs() {
+ Positive(u) => self * u,
+ Negative(u) => -self * u,
+ }
+ }
+}
+
+impl MulAssign<i32> for BigInt {
+ #[inline]
+ fn mul_assign(&mut self, other: i32) {
+ match other.checked_uabs() {
+ Positive(u) => *self *= u,
+ Negative(u) => {
+ self.sign = -self.sign;
+ self.data *= u;
+ }
+ }
+ }
+}
+
+impl Mul<i64> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn mul(self, other: i64) -> BigInt {
+ match other.checked_uabs() {
+ Positive(u) => self * u,
+ Negative(u) => -self * u,
+ }
+ }
+}
+
+impl MulAssign<i64> for BigInt {
+ #[inline]
+ fn mul_assign(&mut self, other: i64) {
+ match other.checked_uabs() {
+ Positive(u) => *self *= u,
+ Negative(u) => {
+ self.sign = -self.sign;
+ self.data *= u;
+ }
+ }
+ }
+}
+
+impl Mul<i128> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn mul(self, other: i128) -> BigInt {
+ match other.checked_uabs() {
+ Positive(u) => self * u,
+ Negative(u) => -self * u,
+ }
+ }
+}
+
+impl MulAssign<i128> for BigInt {
+ #[inline]
+ fn mul_assign(&mut self, other: i128) {
+ match other.checked_uabs() {
+ Positive(u) => *self *= u,
+ Negative(u) => {
+ self.sign = -self.sign;
+ self.data *= u;
+ }
+ }
+ }
+}
+
+impl CheckedMul for BigInt {
+ #[inline]
+ fn checked_mul(&self, v: &BigInt) -> Option<BigInt> {
+ Some(self.mul(v))
+ }
+}
+
+impl_product_iter_type!(BigInt);
diff --git a/src/bigint/power.rs b/src/bigint/power.rs
new file mode 100644
index 0000000..a4dd806
--- /dev/null
+++ b/src/bigint/power.rs
@@ -0,0 +1,94 @@
+use super::BigInt;
+use super::Sign::{self, Minus, Plus};
+
+use crate::BigUint;
+
+use num_integer::Integer;
+use num_traits::{Pow, Signed, Zero};
+
+/// Help function for pow
+///
+/// Computes the effect of the exponent on the sign.
+#[inline]
+fn powsign<T: Integer>(sign: Sign, other: &T) -> Sign {
+ if other.is_zero() {
+ Plus
+ } else if sign != Minus || other.is_odd() {
+ sign
+ } else {
+ -sign
+ }
+}
+
+macro_rules! pow_impl {
+ ($T:ty) => {
+ impl Pow<$T> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn pow(self, rhs: $T) -> BigInt {
+ BigInt::from_biguint(powsign(self.sign, &rhs), self.data.pow(rhs))
+ }
+ }
+
+ impl<'b> Pow<&'b $T> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn pow(self, rhs: &$T) -> BigInt {
+ BigInt::from_biguint(powsign(self.sign, rhs), self.data.pow(rhs))
+ }
+ }
+
+ impl<'a> Pow<$T> for &'a BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn pow(self, rhs: $T) -> BigInt {
+ BigInt::from_biguint(powsign(self.sign, &rhs), Pow::pow(&self.data, rhs))
+ }
+ }
+
+ impl<'a, 'b> Pow<&'b $T> for &'a BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn pow(self, rhs: &$T) -> BigInt {
+ BigInt::from_biguint(powsign(self.sign, rhs), Pow::pow(&self.data, rhs))
+ }
+ }
+ };
+}
+
+pow_impl!(u8);
+pow_impl!(u16);
+pow_impl!(u32);
+pow_impl!(u64);
+pow_impl!(usize);
+pow_impl!(u128);
+pow_impl!(BigUint);
+
+pub(super) fn modpow(x: &BigInt, exponent: &BigInt, modulus: &BigInt) -> BigInt {
+ assert!(
+ !exponent.is_negative(),
+ "negative exponentiation is not supported!"
+ );
+ assert!(
+ !modulus.is_zero(),
+ "attempt to calculate with zero modulus!"
+ );
+
+ let result = x.data.modpow(&exponent.data, &modulus.data);
+ if result.is_zero() {
+ return BigInt::zero();
+ }
+
+ // The sign of the result follows the modulus, like `mod_floor`.
+ let (sign, mag) = match (x.is_negative() && exponent.is_odd(), modulus.is_negative()) {
+ (false, false) => (Plus, result),
+ (true, false) => (Plus, &modulus.data - result),
+ (false, true) => (Minus, &modulus.data - result),
+ (true, true) => (Minus, result),
+ };
+ BigInt::from_biguint(sign, mag)
+}
diff --git a/src/bigint/serde.rs b/src/bigint/serde.rs
new file mode 100644
index 0000000..5c232f9
--- /dev/null
+++ b/src/bigint/serde.rs
@@ -0,0 +1,58 @@
+use super::{BigInt, Sign};
+
+use serde::de::{Error, Unexpected};
+use serde::{Deserialize, Deserializer, Serialize, Serializer};
+
+impl Serialize for Sign {
+ fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+ where
+ S: Serializer,
+ {
+ // Note: do not change the serialization format, or it may break
+ // forward and backward compatibility of serialized data!
+ match *self {
+ Sign::Minus => (-1i8).serialize(serializer),
+ Sign::NoSign => 0i8.serialize(serializer),
+ Sign::Plus => 1i8.serialize(serializer),
+ }
+ }
+}
+
+impl<'de> Deserialize<'de> for Sign {
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where
+ D: Deserializer<'de>,
+ {
+ let sign = i8::deserialize(deserializer)?;
+ match sign {
+ -1 => Ok(Sign::Minus),
+ 0 => Ok(Sign::NoSign),
+ 1 => Ok(Sign::Plus),
+ _ => Err(D::Error::invalid_value(
+ Unexpected::Signed(sign.into()),
+ &"a sign of -1, 0, or 1",
+ )),
+ }
+ }
+}
+
+impl Serialize for BigInt {
+ fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+ where
+ S: Serializer,
+ {
+ // Note: do not change the serialization format, or it may break
+ // forward and backward compatibility of serialized data!
+ (self.sign, &self.data).serialize(serializer)
+ }
+}
+
+impl<'de> Deserialize<'de> for BigInt {
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where
+ D: Deserializer<'de>,
+ {
+ let (sign, data) = Deserialize::deserialize(deserializer)?;
+ Ok(BigInt::from_biguint(sign, data))
+ }
+}
diff --git a/src/bigint/shift.rs b/src/bigint/shift.rs
new file mode 100644
index 0000000..b816e12
--- /dev/null
+++ b/src/bigint/shift.rs
@@ -0,0 +1,107 @@
+use super::BigInt;
+use super::Sign::NoSign;
+
+use core::ops::{Shl, ShlAssign, Shr, ShrAssign};
+use num_traits::{PrimInt, Signed, Zero};
+
+macro_rules! impl_shift {
+ (@ref $Shx:ident :: $shx:ident, $ShxAssign:ident :: $shx_assign:ident, $rhs:ty) => {
+ impl<'b> $Shx<&'b $rhs> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn $shx(self, rhs: &'b $rhs) -> BigInt {
+ $Shx::$shx(self, *rhs)
+ }
+ }
+ impl<'a, 'b> $Shx<&'b $rhs> for &'a BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn $shx(self, rhs: &'b $rhs) -> BigInt {
+ $Shx::$shx(self, *rhs)
+ }
+ }
+ impl<'b> $ShxAssign<&'b $rhs> for BigInt {
+ #[inline]
+ fn $shx_assign(&mut self, rhs: &'b $rhs) {
+ $ShxAssign::$shx_assign(self, *rhs);
+ }
+ }
+ };
+ ($($rhs:ty),+) => {$(
+ impl Shl<$rhs> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn shl(self, rhs: $rhs) -> BigInt {
+ BigInt::from_biguint(self.sign, self.data << rhs)
+ }
+ }
+ impl<'a> Shl<$rhs> for &'a BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn shl(self, rhs: $rhs) -> BigInt {
+ BigInt::from_biguint(self.sign, &self.data << rhs)
+ }
+ }
+ impl ShlAssign<$rhs> for BigInt {
+ #[inline]
+ fn shl_assign(&mut self, rhs: $rhs) {
+ self.data <<= rhs
+ }
+ }
+ impl_shift! { @ref Shl::shl, ShlAssign::shl_assign, $rhs }
+
+ impl Shr<$rhs> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn shr(self, rhs: $rhs) -> BigInt {
+ let round_down = shr_round_down(&self, rhs);
+ let data = self.data >> rhs;
+ let data = if round_down { data + 1u8 } else { data };
+ BigInt::from_biguint(self.sign, data)
+ }
+ }
+ impl<'a> Shr<$rhs> for &'a BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn shr(self, rhs: $rhs) -> BigInt {
+ let round_down = shr_round_down(self, rhs);
+ let data = &self.data >> rhs;
+ let data = if round_down { data + 1u8 } else { data };
+ BigInt::from_biguint(self.sign, data)
+ }
+ }
+ impl ShrAssign<$rhs> for BigInt {
+ #[inline]
+ fn shr_assign(&mut self, rhs: $rhs) {
+ let round_down = shr_round_down(self, rhs);
+ self.data >>= rhs;
+ if round_down {
+ self.data += 1u8;
+ } else if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+ }
+ impl_shift! { @ref Shr::shr, ShrAssign::shr_assign, $rhs }
+ )*};
+}
+
+impl_shift! { u8, u16, u32, u64, u128, usize }
+impl_shift! { i8, i16, i32, i64, i128, isize }
+
+// Negative values need a rounding adjustment if there are any ones in the
+// bits that are getting shifted out.
+fn shr_round_down<T: PrimInt>(i: &BigInt, shift: T) -> bool {
+ if i.is_negative() {
+ let zeros = i.trailing_zeros().expect("negative values are non-zero");
+ shift > T::zero() && shift.to_u64().map(|shift| zeros < shift).unwrap_or(true)
+ } else {
+ false
+ }
+}
diff --git a/src/bigint/subtraction.rs b/src/bigint/subtraction.rs
new file mode 100644
index 0000000..a12a844
--- /dev/null
+++ b/src/bigint/subtraction.rs
@@ -0,0 +1,300 @@
+use super::CheckedUnsignedAbs::{Negative, Positive};
+use super::Sign::{Minus, NoSign, Plus};
+use super::{BigInt, UnsignedAbs};
+
+use crate::{IsizePromotion, UsizePromotion};
+
+use core::cmp::Ordering::{Equal, Greater, Less};
+use core::mem;
+use core::ops::{Sub, SubAssign};
+use num_traits::{CheckedSub, Zero};
+
+// We want to forward to BigUint::sub, but it's not clear how that will go until
+// we compare both sign and magnitude. So we duplicate this body for every
+// val/ref combination, deferring that decision to BigUint's own forwarding.
+macro_rules! bigint_sub {
+ ($a:expr, $a_owned:expr, $a_data:expr, $b:expr, $b_owned:expr, $b_data:expr) => {
+ match ($a.sign, $b.sign) {
+ (_, NoSign) => $a_owned,
+ (NoSign, _) => -$b_owned,
+ // opposite signs => keep the sign of the left with the sum of magnitudes
+ (Plus, Minus) | (Minus, Plus) => BigInt::from_biguint($a.sign, $a_data + $b_data),
+ // same sign => keep or toggle the sign of the left with the difference of magnitudes
+ (Plus, Plus) | (Minus, Minus) => match $a.data.cmp(&$b.data) {
+ Less => BigInt::from_biguint(-$a.sign, $b_data - $a_data),
+ Greater => BigInt::from_biguint($a.sign, $a_data - $b_data),
+ Equal => Zero::zero(),
+ },
+ }
+ };
+}
+
+impl<'a, 'b> Sub<&'b BigInt> for &'a BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: &BigInt) -> BigInt {
+ bigint_sub!(
+ self,
+ self.clone(),
+ &self.data,
+ other,
+ other.clone(),
+ &other.data
+ )
+ }
+}
+
+impl<'a> Sub<BigInt> for &'a BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: BigInt) -> BigInt {
+ bigint_sub!(self, self.clone(), &self.data, other, other, other.data)
+ }
+}
+
+impl<'a> Sub<&'a BigInt> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: &BigInt) -> BigInt {
+ bigint_sub!(self, self, self.data, other, other.clone(), &other.data)
+ }
+}
+
+impl Sub<BigInt> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: BigInt) -> BigInt {
+ bigint_sub!(self, self, self.data, other, other, other.data)
+ }
+}
+
+impl<'a> SubAssign<&'a BigInt> for BigInt {
+ #[inline]
+ fn sub_assign(&mut self, other: &BigInt) {
+ let n = mem::replace(self, BigInt::zero());
+ *self = n - other;
+ }
+}
+forward_val_assign!(impl SubAssign for BigInt, sub_assign);
+
+promote_all_scalars!(impl Sub for BigInt, sub);
+promote_all_scalars_assign!(impl SubAssign for BigInt, sub_assign);
+forward_all_scalar_binop_to_val_val!(impl Sub<u32> for BigInt, sub);
+forward_all_scalar_binop_to_val_val!(impl Sub<u64> for BigInt, sub);
+forward_all_scalar_binop_to_val_val!(impl Sub<u128> for BigInt, sub);
+
+impl Sub<u32> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: u32) -> BigInt {
+ match self.sign {
+ NoSign => -BigInt::from(other),
+ Minus => -BigInt::from(self.data + other),
+ Plus => match self.data.cmp(&From::from(other)) {
+ Equal => Zero::zero(),
+ Greater => BigInt::from(self.data - other),
+ Less => -BigInt::from(other - self.data),
+ },
+ }
+ }
+}
+impl SubAssign<u32> for BigInt {
+ #[inline]
+ fn sub_assign(&mut self, other: u32) {
+ let n = mem::replace(self, BigInt::zero());
+ *self = n - other;
+ }
+}
+
+impl Sub<BigInt> for u32 {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: BigInt) -> BigInt {
+ -(other - self)
+ }
+}
+
+impl Sub<BigInt> for u64 {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: BigInt) -> BigInt {
+ -(other - self)
+ }
+}
+
+impl Sub<BigInt> for u128 {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: BigInt) -> BigInt {
+ -(other - self)
+ }
+}
+
+impl Sub<u64> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: u64) -> BigInt {
+ match self.sign {
+ NoSign => -BigInt::from(other),
+ Minus => -BigInt::from(self.data + other),
+ Plus => match self.data.cmp(&From::from(other)) {
+ Equal => Zero::zero(),
+ Greater => BigInt::from(self.data - other),
+ Less => -BigInt::from(other - self.data),
+ },
+ }
+ }
+}
+
+impl SubAssign<u64> for BigInt {
+ #[inline]
+ fn sub_assign(&mut self, other: u64) {
+ let n = mem::replace(self, BigInt::zero());
+ *self = n - other;
+ }
+}
+
+impl Sub<u128> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: u128) -> BigInt {
+ match self.sign {
+ NoSign => -BigInt::from(other),
+ Minus => -BigInt::from(self.data + other),
+ Plus => match self.data.cmp(&From::from(other)) {
+ Equal => Zero::zero(),
+ Greater => BigInt::from(self.data - other),
+ Less => -BigInt::from(other - self.data),
+ },
+ }
+ }
+}
+
+impl SubAssign<u128> for BigInt {
+ #[inline]
+ fn sub_assign(&mut self, other: u128) {
+ let n = mem::replace(self, BigInt::zero());
+ *self = n - other;
+ }
+}
+
+forward_all_scalar_binop_to_val_val!(impl Sub<i32> for BigInt, sub);
+forward_all_scalar_binop_to_val_val!(impl Sub<i64> for BigInt, sub);
+forward_all_scalar_binop_to_val_val!(impl Sub<i128> for BigInt, sub);
+
+impl Sub<i32> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: i32) -> BigInt {
+ match other.checked_uabs() {
+ Positive(u) => self - u,
+ Negative(u) => self + u,
+ }
+ }
+}
+impl SubAssign<i32> for BigInt {
+ #[inline]
+ fn sub_assign(&mut self, other: i32) {
+ match other.checked_uabs() {
+ Positive(u) => *self -= u,
+ Negative(u) => *self += u,
+ }
+ }
+}
+
+impl Sub<BigInt> for i32 {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: BigInt) -> BigInt {
+ match self.checked_uabs() {
+ Positive(u) => u - other,
+ Negative(u) => -other - u,
+ }
+ }
+}
+
+impl Sub<i64> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: i64) -> BigInt {
+ match other.checked_uabs() {
+ Positive(u) => self - u,
+ Negative(u) => self + u,
+ }
+ }
+}
+impl SubAssign<i64> for BigInt {
+ #[inline]
+ fn sub_assign(&mut self, other: i64) {
+ match other.checked_uabs() {
+ Positive(u) => *self -= u,
+ Negative(u) => *self += u,
+ }
+ }
+}
+
+impl Sub<BigInt> for i64 {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: BigInt) -> BigInt {
+ match self.checked_uabs() {
+ Positive(u) => u - other,
+ Negative(u) => -other - u,
+ }
+ }
+}
+
+impl Sub<i128> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: i128) -> BigInt {
+ match other.checked_uabs() {
+ Positive(u) => self - u,
+ Negative(u) => self + u,
+ }
+ }
+}
+
+impl SubAssign<i128> for BigInt {
+ #[inline]
+ fn sub_assign(&mut self, other: i128) {
+ match other.checked_uabs() {
+ Positive(u) => *self -= u,
+ Negative(u) => *self += u,
+ }
+ }
+}
+
+impl Sub<BigInt> for i128 {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: BigInt) -> BigInt {
+ match self.checked_uabs() {
+ Positive(u) => u - other,
+ Negative(u) => -other - u,
+ }
+ }
+}
+
+impl CheckedSub for BigInt {
+ #[inline]
+ fn checked_sub(&self, v: &BigInt) -> Option<BigInt> {
+ Some(self.sub(v))
+ }
+}
diff --git a/src/bigrand.rs b/src/bigrand.rs
new file mode 100644
index 0000000..cb44032
--- /dev/null
+++ b/src/bigrand.rs
@@ -0,0 +1,283 @@
+//! Randomization of big integers
+
+use rand::distributions::uniform::{SampleBorrow, SampleUniform, UniformSampler};
+use rand::prelude::*;
+
+use crate::BigInt;
+use crate::BigUint;
+use crate::Sign::*;
+
+use crate::biguint::biguint_from_vec;
+
+use num_integer::Integer;
+use num_traits::{ToPrimitive, Zero};
+
+/// A trait for sampling random big integers.
+///
+/// The `rand` feature must be enabled to use this. See crate-level documentation for details.
+pub trait RandBigInt {
+ /// Generate a random `BigUint` of the given bit size.
+ fn gen_biguint(&mut self, bit_size: u64) -> BigUint;
+
+ /// Generate a random BigInt of the given bit size.
+ fn gen_bigint(&mut self, bit_size: u64) -> BigInt;
+
+ /// Generate a random `BigUint` less than the given bound. Fails
+ /// when the bound is zero.
+ fn gen_biguint_below(&mut self, bound: &BigUint) -> BigUint;
+
+ /// Generate a random `BigUint` within the given range. The lower
+ /// bound is inclusive; the upper bound is exclusive. Fails when
+ /// the upper bound is not greater than the lower bound.
+ fn gen_biguint_range(&mut self, lbound: &BigUint, ubound: &BigUint) -> BigUint;
+
+ /// Generate a random `BigInt` within the given range. The lower
+ /// bound is inclusive; the upper bound is exclusive. Fails when
+ /// the upper bound is not greater than the lower bound.
+ fn gen_bigint_range(&mut self, lbound: &BigInt, ubound: &BigInt) -> BigInt;
+}
+
+fn gen_bits<R: Rng + ?Sized>(rng: &mut R, data: &mut [u32], rem: u64) {
+ // `fill` is faster than many `gen::<u32>` calls
+ rng.fill(data);
+ if rem > 0 {
+ let last = data.len() - 1;
+ data[last] >>= 32 - rem;
+ }
+}
+
+impl<R: Rng + ?Sized> RandBigInt for R {
+ #[cfg(not(u64_digit))]
+ fn gen_biguint(&mut self, bit_size: u64) -> BigUint {
+ let (digits, rem) = bit_size.div_rem(&32);
+ let len = (digits + (rem > 0) as u64)
+ .to_usize()
+ .expect("capacity overflow");
+ let mut data = vec![0u32; len];
+ gen_bits(self, &mut data, rem);
+ biguint_from_vec(data)
+ }
+
+ #[cfg(u64_digit)]
+ fn gen_biguint(&mut self, bit_size: u64) -> BigUint {
+ use core::slice;
+
+ let (digits, rem) = bit_size.div_rem(&32);
+ let len = (digits + (rem > 0) as u64)
+ .to_usize()
+ .expect("capacity overflow");
+ let native_digits = bit_size.div_ceil(&64);
+ let native_len = native_digits.to_usize().expect("capacity overflow");
+ let mut data = vec![0u64; native_len];
+ unsafe {
+ // Generate bits in a `&mut [u32]` slice for value stability
+ let ptr = data.as_mut_ptr() as *mut u32;
+ debug_assert!(native_len * 2 >= len);
+ let data = slice::from_raw_parts_mut(ptr, len);
+ gen_bits(self, data, rem);
+ }
+ #[cfg(target_endian = "big")]
+ for digit in &mut data {
+ // swap u32 digits into u64 endianness
+ *digit = (*digit << 32) | (*digit >> 32);
+ }
+ biguint_from_vec(data)
+ }
+
+ fn gen_bigint(&mut self, bit_size: u64) -> BigInt {
+ loop {
+ // Generate a random BigUint...
+ let biguint = self.gen_biguint(bit_size);
+ // ...and then randomly assign it a Sign...
+ let sign = if biguint.is_zero() {
+ // ...except that if the BigUint is zero, we need to try
+ // again with probability 0.5. This is because otherwise,
+ // the probability of generating a zero BigInt would be
+ // double that of any other number.
+ if self.gen() {
+ continue;
+ } else {
+ NoSign
+ }
+ } else if self.gen() {
+ Plus
+ } else {
+ Minus
+ };
+ return BigInt::from_biguint(sign, biguint);
+ }
+ }
+
+ fn gen_biguint_below(&mut self, bound: &BigUint) -> BigUint {
+ assert!(!bound.is_zero());
+ let bits = bound.bits();
+ loop {
+ let n = self.gen_biguint(bits);
+ if n < *bound {
+ return n;
+ }
+ }
+ }
+
+ fn gen_biguint_range(&mut self, lbound: &BigUint, ubound: &BigUint) -> BigUint {
+ assert!(*lbound < *ubound);
+ if lbound.is_zero() {
+ self.gen_biguint_below(ubound)
+ } else {
+ lbound + self.gen_biguint_below(&(ubound - lbound))
+ }
+ }
+
+ fn gen_bigint_range(&mut self, lbound: &BigInt, ubound: &BigInt) -> BigInt {
+ assert!(*lbound < *ubound);
+ if lbound.is_zero() {
+ BigInt::from(self.gen_biguint_below(ubound.magnitude()))
+ } else if ubound.is_zero() {
+ lbound + BigInt::from(self.gen_biguint_below(lbound.magnitude()))
+ } else {
+ let delta = ubound - lbound;
+ lbound + BigInt::from(self.gen_biguint_below(delta.magnitude()))
+ }
+ }
+}
+
+/// The back-end implementing rand's `UniformSampler` for `BigUint`.
+#[derive(Clone, Debug)]
+pub struct UniformBigUint {
+ base: BigUint,
+ len: BigUint,
+}
+
+impl UniformSampler for UniformBigUint {
+ type X = BigUint;
+
+ #[inline]
+ fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
+ where
+ B1: SampleBorrow<Self::X> + Sized,
+ B2: SampleBorrow<Self::X> + Sized,
+ {
+ let low = low_b.borrow();
+ let high = high_b.borrow();
+ assert!(low < high);
+ UniformBigUint {
+ len: high - low,
+ base: low.clone(),
+ }
+ }
+
+ #[inline]
+ fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
+ where
+ B1: SampleBorrow<Self::X> + Sized,
+ B2: SampleBorrow<Self::X> + Sized,
+ {
+ let low = low_b.borrow();
+ let high = high_b.borrow();
+ assert!(low <= high);
+ Self::new(low, high + 1u32)
+ }
+
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
+ &self.base + rng.gen_biguint_below(&self.len)
+ }
+
+ #[inline]
+ fn sample_single<R: Rng + ?Sized, B1, B2>(low: B1, high: B2, rng: &mut R) -> Self::X
+ where
+ B1: SampleBorrow<Self::X> + Sized,
+ B2: SampleBorrow<Self::X> + Sized,
+ {
+ rng.gen_biguint_range(low.borrow(), high.borrow())
+ }
+}
+
+impl SampleUniform for BigUint {
+ type Sampler = UniformBigUint;
+}
+
+/// The back-end implementing rand's `UniformSampler` for `BigInt`.
+#[derive(Clone, Debug)]
+pub struct UniformBigInt {
+ base: BigInt,
+ len: BigUint,
+}
+
+impl UniformSampler for UniformBigInt {
+ type X = BigInt;
+
+ #[inline]
+ fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
+ where
+ B1: SampleBorrow<Self::X> + Sized,
+ B2: SampleBorrow<Self::X> + Sized,
+ {
+ let low = low_b.borrow();
+ let high = high_b.borrow();
+ assert!(low < high);
+ UniformBigInt {
+ len: (high - low).into_parts().1,
+ base: low.clone(),
+ }
+ }
+
+ #[inline]
+ fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
+ where
+ B1: SampleBorrow<Self::X> + Sized,
+ B2: SampleBorrow<Self::X> + Sized,
+ {
+ let low = low_b.borrow();
+ let high = high_b.borrow();
+ assert!(low <= high);
+ Self::new(low, high + 1u32)
+ }
+
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
+ &self.base + BigInt::from(rng.gen_biguint_below(&self.len))
+ }
+
+ #[inline]
+ fn sample_single<R: Rng + ?Sized, B1, B2>(low: B1, high: B2, rng: &mut R) -> Self::X
+ where
+ B1: SampleBorrow<Self::X> + Sized,
+ B2: SampleBorrow<Self::X> + Sized,
+ {
+ rng.gen_bigint_range(low.borrow(), high.borrow())
+ }
+}
+
+impl SampleUniform for BigInt {
+ type Sampler = UniformBigInt;
+}
+
+/// A random distribution for `BigUint` and `BigInt` values of a particular bit size.
+///
+/// The `rand` feature must be enabled to use this. See crate-level documentation for details.
+#[derive(Clone, Copy, Debug)]
+pub struct RandomBits {
+ bits: u64,
+}
+
+impl RandomBits {
+ #[inline]
+ pub fn new(bits: u64) -> RandomBits {
+ RandomBits { bits }
+ }
+}
+
+impl Distribution<BigUint> for RandomBits {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> BigUint {
+ rng.gen_biguint(self.bits)
+ }
+}
+
+impl Distribution<BigInt> for RandomBits {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> BigInt {
+ rng.gen_bigint(self.bits)
+ }
+}
diff --git a/src/biguint.rs b/src/biguint.rs
new file mode 100644
index 0000000..4235071
--- /dev/null
+++ b/src/biguint.rs
@@ -0,0 +1,1102 @@
+use crate::big_digit::{self, BigDigit};
+use crate::std_alloc::{String, Vec};
+
+use core::cmp;
+use core::cmp::Ordering;
+use core::default::Default;
+use core::fmt;
+use core::hash;
+use core::mem;
+use core::str;
+use core::{u32, u64, u8};
+
+use num_integer::{Integer, Roots};
+use num_traits::{Num, One, Pow, ToPrimitive, Unsigned, Zero};
+
+mod addition;
+mod division;
+mod multiplication;
+mod subtraction;
+
+mod bits;
+mod convert;
+mod iter;
+mod monty;
+mod power;
+mod shift;
+
+#[cfg(any(feature = "quickcheck", feature = "arbitrary"))]
+mod arbitrary;
+
+#[cfg(feature = "serde")]
+mod serde;
+
+pub(crate) use self::convert::to_str_radix_reversed;
+pub use self::iter::{U32Digits, U64Digits};
+
+/// A big unsigned integer type.
+pub struct BigUint {
+ data: Vec<BigDigit>,
+}
+
+// Note: derived `Clone` doesn't specialize `clone_from`,
+// but we want to keep the allocation in `data`.
+impl Clone for BigUint {
+ #[inline]
+ fn clone(&self) -> Self {
+ BigUint {
+ data: self.data.clone(),
+ }
+ }
+
+ #[inline]
+ fn clone_from(&mut self, other: &Self) {
+ self.data.clone_from(&other.data);
+ }
+}
+
+impl hash::Hash for BigUint {
+ #[inline]
+ fn hash<H: hash::Hasher>(&self, state: &mut H) {
+ debug_assert!(self.data.last() != Some(&0));
+ self.data.hash(state);
+ }
+}
+
+impl PartialEq for BigUint {
+ #[inline]
+ fn eq(&self, other: &BigUint) -> bool {
+ debug_assert!(self.data.last() != Some(&0));
+ debug_assert!(other.data.last() != Some(&0));
+ self.data == other.data
+ }
+}
+impl Eq for BigUint {}
+
+impl PartialOrd for BigUint {
+ #[inline]
+ fn partial_cmp(&self, other: &BigUint) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+impl Ord for BigUint {
+ #[inline]
+ fn cmp(&self, other: &BigUint) -> Ordering {
+ cmp_slice(&self.data[..], &other.data[..])
+ }
+}
+
+#[inline]
+fn cmp_slice(a: &[BigDigit], b: &[BigDigit]) -> Ordering {
+ debug_assert!(a.last() != Some(&0));
+ debug_assert!(b.last() != Some(&0));
+
+ match Ord::cmp(&a.len(), &b.len()) {
+ Ordering::Equal => Iterator::cmp(a.iter().rev(), b.iter().rev()),
+ other => other,
+ }
+}
+
+impl Default for BigUint {
+ #[inline]
+ fn default() -> BigUint {
+ Zero::zero()
+ }
+}
+
+impl fmt::Debug for BigUint {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(self, f)
+ }
+}
+
+impl fmt::Display for BigUint {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.pad_integral(true, "", &self.to_str_radix(10))
+ }
+}
+
+impl fmt::LowerHex for BigUint {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.pad_integral(true, "0x", &self.to_str_radix(16))
+ }
+}
+
+impl fmt::UpperHex for BigUint {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let mut s = self.to_str_radix(16);
+ s.make_ascii_uppercase();
+ f.pad_integral(true, "0x", &s)
+ }
+}
+
+impl fmt::Binary for BigUint {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.pad_integral(true, "0b", &self.to_str_radix(2))
+ }
+}
+
+impl fmt::Octal for BigUint {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.pad_integral(true, "0o", &self.to_str_radix(8))
+ }
+}
+
+impl Zero for BigUint {
+ #[inline]
+ fn zero() -> BigUint {
+ BigUint { data: Vec::new() }
+ }
+
+ #[inline]
+ fn set_zero(&mut self) {
+ self.data.clear();
+ }
+
+ #[inline]
+ fn is_zero(&self) -> bool {
+ self.data.is_empty()
+ }
+}
+
+impl One for BigUint {
+ #[inline]
+ fn one() -> BigUint {
+ BigUint { data: vec![1] }
+ }
+
+ #[inline]
+ fn set_one(&mut self) {
+ self.data.clear();
+ self.data.push(1);
+ }
+
+ #[inline]
+ fn is_one(&self) -> bool {
+ self.data[..] == [1]
+ }
+}
+
+impl Unsigned for BigUint {}
+
+impl Integer for BigUint {
+ #[inline]
+ fn div_rem(&self, other: &BigUint) -> (BigUint, BigUint) {
+ division::div_rem_ref(self, other)
+ }
+
+ #[inline]
+ fn div_floor(&self, other: &BigUint) -> BigUint {
+ let (d, _) = division::div_rem_ref(self, other);
+ d
+ }
+
+ #[inline]
+ fn mod_floor(&self, other: &BigUint) -> BigUint {
+ let (_, m) = division::div_rem_ref(self, other);
+ m
+ }
+
+ #[inline]
+ fn div_mod_floor(&self, other: &BigUint) -> (BigUint, BigUint) {
+ division::div_rem_ref(self, other)
+ }
+
+ #[inline]
+ fn div_ceil(&self, other: &BigUint) -> BigUint {
+ let (d, m) = division::div_rem_ref(self, other);
+ if m.is_zero() {
+ d
+ } else {
+ d + 1u32
+ }
+ }
+
+ /// Calculates the Greatest Common Divisor (GCD) of the number and `other`.
+ ///
+ /// The result is always positive.
+ #[inline]
+ fn gcd(&self, other: &Self) -> Self {
+ #[inline]
+ fn twos(x: &BigUint) -> u64 {
+ x.trailing_zeros().unwrap_or(0)
+ }
+
+ // Stein's algorithm
+ if self.is_zero() {
+ return other.clone();
+ }
+ if other.is_zero() {
+ return self.clone();
+ }
+ let mut m = self.clone();
+ let mut n = other.clone();
+
+ // find common factors of 2
+ let shift = cmp::min(twos(&n), twos(&m));
+
+ // divide m and n by 2 until odd
+ // m inside loop
+ n >>= twos(&n);
+
+ while !m.is_zero() {
+ m >>= twos(&m);
+ if n > m {
+ mem::swap(&mut n, &mut m)
+ }
+ m -= &n;
+ }
+
+ n << shift
+ }
+
+ /// Calculates the Lowest Common Multiple (LCM) of the number and `other`.
+ #[inline]
+ fn lcm(&self, other: &BigUint) -> BigUint {
+ if self.is_zero() && other.is_zero() {
+ Self::zero()
+ } else {
+ self / self.gcd(other) * other
+ }
+ }
+
+ /// Calculates the Greatest Common Divisor (GCD) and
+ /// Lowest Common Multiple (LCM) together.
+ #[inline]
+ fn gcd_lcm(&self, other: &Self) -> (Self, Self) {
+ let gcd = self.gcd(other);
+ let lcm = if gcd.is_zero() {
+ Self::zero()
+ } else {
+ self / &gcd * other
+ };
+ (gcd, lcm)
+ }
+
+ /// Deprecated, use `is_multiple_of` instead.
+ #[inline]
+ fn divides(&self, other: &BigUint) -> bool {
+ self.is_multiple_of(other)
+ }
+
+ /// Returns `true` if the number is a multiple of `other`.
+ #[inline]
+ fn is_multiple_of(&self, other: &BigUint) -> bool {
+ (self % other).is_zero()
+ }
+
+ /// Returns `true` if the number is divisible by `2`.
+ #[inline]
+ fn is_even(&self) -> bool {
+ // Considering only the last digit.
+ match self.data.first() {
+ Some(x) => x.is_even(),
+ None => true,
+ }
+ }
+
+ /// Returns `true` if the number is not divisible by `2`.
+ #[inline]
+ fn is_odd(&self) -> bool {
+ !self.is_even()
+ }
+
+ /// Rounds up to nearest multiple of argument.
+ #[inline]
+ fn next_multiple_of(&self, other: &Self) -> Self {
+ let m = self.mod_floor(other);
+ if m.is_zero() {
+ self.clone()
+ } else {
+ self + (other - m)
+ }
+ }
+ /// Rounds down to nearest multiple of argument.
+ #[inline]
+ fn prev_multiple_of(&self, other: &Self) -> Self {
+ self - self.mod_floor(other)
+ }
+}
+
+#[inline]
+fn fixpoint<F>(mut x: BigUint, max_bits: u64, f: F) -> BigUint
+where
+ F: Fn(&BigUint) -> BigUint,
+{
+ let mut xn = f(&x);
+
+ // If the value increased, then the initial guess must have been low.
+ // Repeat until we reverse course.
+ while x < xn {
+ // Sometimes an increase will go way too far, especially with large
+ // powers, and then take a long time to walk back. We know an upper
+ // bound based on bit size, so saturate on that.
+ x = if xn.bits() > max_bits {
+ BigUint::one() << max_bits
+ } else {
+ xn
+ };
+ xn = f(&x);
+ }
+
+ // Now keep repeating while the estimate is decreasing.
+ while x > xn {
+ x = xn;
+ xn = f(&x);
+ }
+ x
+}
+
+impl Roots for BigUint {
+ // nth_root, sqrt and cbrt use Newton's method to compute
+ // principal root of a given degree for a given integer.
+
+ // Reference:
+ // Brent & Zimmermann, Modern Computer Arithmetic, v0.5.9, Algorithm 1.14
+ fn nth_root(&self, n: u32) -> Self {
+ assert!(n > 0, "root degree n must be at least 1");
+
+ if self.is_zero() || self.is_one() {
+ return self.clone();
+ }
+
+ match n {
+ // Optimize for small n
+ 1 => return self.clone(),
+ 2 => return self.sqrt(),
+ 3 => return self.cbrt(),
+ _ => (),
+ }
+
+ // The root of non-zero values less than 2ⁿ can only be 1.
+ let bits = self.bits();
+ let n64 = u64::from(n);
+ if bits <= n64 {
+ return BigUint::one();
+ }
+
+ // If we fit in `u64`, compute the root that way.
+ if let Some(x) = self.to_u64() {
+ return x.nth_root(n).into();
+ }
+
+ let max_bits = bits / n64 + 1;
+
+ #[cfg(feature = "std")]
+ let guess = match self.to_f64() {
+ Some(f) if f.is_finite() => {
+ use num_traits::FromPrimitive;
+
+ // We fit in `f64` (lossy), so get a better initial guess from that.
+ BigUint::from_f64((f.ln() / f64::from(n)).exp()).unwrap()
+ }
+ _ => {
+ // Try to guess by scaling down such that it does fit in `f64`.
+ // With some (x * 2ⁿᵏ), its nth root ≈ (ⁿ√x * 2ᵏ)
+ let extra_bits = bits - (core::f64::MAX_EXP as u64 - 1);
+ let root_scale = extra_bits.div_ceil(&n64);
+ let scale = root_scale * n64;
+ if scale < bits && bits - scale > n64 {
+ (self >> scale).nth_root(n) << root_scale
+ } else {
+ BigUint::one() << max_bits
+ }
+ }
+ };
+
+ #[cfg(not(feature = "std"))]
+ let guess = BigUint::one() << max_bits;
+
+ let n_min_1 = n - 1;
+ fixpoint(guess, max_bits, move |s| {
+ let q = self / s.pow(n_min_1);
+ let t = n_min_1 * s + q;
+ t / n
+ })
+ }
+
+ // Reference:
+ // Brent & Zimmermann, Modern Computer Arithmetic, v0.5.9, Algorithm 1.13
+ fn sqrt(&self) -> Self {
+ if self.is_zero() || self.is_one() {
+ return self.clone();
+ }
+
+ // If we fit in `u64`, compute the root that way.
+ if let Some(x) = self.to_u64() {
+ return x.sqrt().into();
+ }
+
+ let bits = self.bits();
+ let max_bits = bits / 2 + 1;
+
+ #[cfg(feature = "std")]
+ let guess = match self.to_f64() {
+ Some(f) if f.is_finite() => {
+ use num_traits::FromPrimitive;
+
+ // We fit in `f64` (lossy), so get a better initial guess from that.
+ BigUint::from_f64(f.sqrt()).unwrap()
+ }
+ _ => {
+ // Try to guess by scaling down such that it does fit in `f64`.
+ // With some (x * 2²ᵏ), its sqrt ≈ (√x * 2ᵏ)
+ let extra_bits = bits - (core::f64::MAX_EXP as u64 - 1);
+ let root_scale = (extra_bits + 1) / 2;
+ let scale = root_scale * 2;
+ (self >> scale).sqrt() << root_scale
+ }
+ };
+
+ #[cfg(not(feature = "std"))]
+ let guess = BigUint::one() << max_bits;
+
+ fixpoint(guess, max_bits, move |s| {
+ let q = self / s;
+ let t = s + q;
+ t >> 1
+ })
+ }
+
+ fn cbrt(&self) -> Self {
+ if self.is_zero() || self.is_one() {
+ return self.clone();
+ }
+
+ // If we fit in `u64`, compute the root that way.
+ if let Some(x) = self.to_u64() {
+ return x.cbrt().into();
+ }
+
+ let bits = self.bits();
+ let max_bits = bits / 3 + 1;
+
+ #[cfg(feature = "std")]
+ let guess = match self.to_f64() {
+ Some(f) if f.is_finite() => {
+ use num_traits::FromPrimitive;
+
+ // We fit in `f64` (lossy), so get a better initial guess from that.
+ BigUint::from_f64(f.cbrt()).unwrap()
+ }
+ _ => {
+ // Try to guess by scaling down such that it does fit in `f64`.
+ // With some (x * 2³ᵏ), its cbrt ≈ (∛x * 2ᵏ)
+ let extra_bits = bits - (core::f64::MAX_EXP as u64 - 1);
+ let root_scale = (extra_bits + 2) / 3;
+ let scale = root_scale * 3;
+ (self >> scale).cbrt() << root_scale
+ }
+ };
+
+ #[cfg(not(feature = "std"))]
+ let guess = BigUint::one() << max_bits;
+
+ fixpoint(guess, max_bits, move |s| {
+ let q = self / (s * s);
+ let t = (s << 1) + q;
+ t / 3u32
+ })
+ }
+}
+
+/// A generic trait for converting a value to a `BigUint`.
+pub trait ToBigUint {
+ /// Converts the value of `self` to a `BigUint`.
+ fn to_biguint(&self) -> Option<BigUint>;
+}
+
+/// Creates and initializes a `BigUint`.
+///
+/// The digits are in little-endian base matching `BigDigit`.
+#[inline]
+pub(crate) fn biguint_from_vec(digits: Vec<BigDigit>) -> BigUint {
+ BigUint { data: digits }.normalized()
+}
+
+impl BigUint {
+ /// Creates and initializes a `BigUint`.
+ ///
+ /// The base 2<sup>32</sup> digits are ordered least significant digit first.
+ #[inline]
+ pub fn new(digits: Vec<u32>) -> BigUint {
+ let mut big = BigUint::zero();
+
+ #[cfg(not(u64_digit))]
+ {
+ big.data = digits;
+ big.normalize();
+ }
+
+ #[cfg(u64_digit)]
+ big.assign_from_slice(&digits);
+
+ big
+ }
+
+ /// Creates and initializes a `BigUint`.
+ ///
+ /// The base 2<sup>32</sup> digits are ordered least significant digit first.
+ #[inline]
+ pub fn from_slice(slice: &[u32]) -> BigUint {
+ let mut big = BigUint::zero();
+ big.assign_from_slice(slice);
+ big
+ }
+
+ /// Assign a value to a `BigUint`.
+ ///
+ /// The base 2<sup>32</sup> digits are ordered least significant digit first.
+ #[inline]
+ pub fn assign_from_slice(&mut self, slice: &[u32]) {
+ self.data.clear();
+
+ #[cfg(not(u64_digit))]
+ self.data.extend_from_slice(slice);
+
+ #[cfg(u64_digit)]
+ self.data.extend(slice.chunks(2).map(u32_chunk_to_u64));
+
+ self.normalize();
+ }
+
+ /// Creates and initializes a `BigUint`.
+ ///
+ /// The bytes are in big-endian byte order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::BigUint;
+ ///
+ /// assert_eq!(BigUint::from_bytes_be(b"A"),
+ /// BigUint::parse_bytes(b"65", 10).unwrap());
+ /// assert_eq!(BigUint::from_bytes_be(b"AA"),
+ /// BigUint::parse_bytes(b"16705", 10).unwrap());
+ /// assert_eq!(BigUint::from_bytes_be(b"AB"),
+ /// BigUint::parse_bytes(b"16706", 10).unwrap());
+ /// assert_eq!(BigUint::from_bytes_be(b"Hello world!"),
+ /// BigUint::parse_bytes(b"22405534230753963835153736737", 10).unwrap());
+ /// ```
+ #[inline]
+ pub fn from_bytes_be(bytes: &[u8]) -> BigUint {
+ if bytes.is_empty() {
+ Zero::zero()
+ } else {
+ let mut v = bytes.to_vec();
+ v.reverse();
+ BigUint::from_bytes_le(&*v)
+ }
+ }
+
+ /// Creates and initializes a `BigUint`.
+ ///
+ /// The bytes are in little-endian byte order.
+ #[inline]
+ pub fn from_bytes_le(bytes: &[u8]) -> BigUint {
+ if bytes.is_empty() {
+ Zero::zero()
+ } else {
+ convert::from_bitwise_digits_le(bytes, 8)
+ }
+ }
+
+ /// Creates and initializes a `BigUint`. The input slice must contain
+ /// ascii/utf8 characters in [0-9a-zA-Z].
+ /// `radix` must be in the range `2...36`.
+ ///
+ /// The function `from_str_radix` from the `Num` trait provides the same logic
+ /// for `&str` buffers.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{BigUint, ToBigUint};
+ ///
+ /// assert_eq!(BigUint::parse_bytes(b"1234", 10), ToBigUint::to_biguint(&1234));
+ /// assert_eq!(BigUint::parse_bytes(b"ABCD", 16), ToBigUint::to_biguint(&0xABCD));
+ /// assert_eq!(BigUint::parse_bytes(b"G", 16), None);
+ /// ```
+ #[inline]
+ pub fn parse_bytes(buf: &[u8], radix: u32) -> Option<BigUint> {
+ let s = str::from_utf8(buf).ok()?;
+ BigUint::from_str_radix(s, radix).ok()
+ }
+
+ /// Creates and initializes a `BigUint`. Each u8 of the input slice is
+ /// interpreted as one digit of the number
+ /// and must therefore be less than `radix`.
+ ///
+ /// The bytes are in big-endian byte order.
+ /// `radix` must be in the range `2...256`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{BigUint};
+ ///
+ /// let inbase190 = &[15, 33, 125, 12, 14];
+ /// let a = BigUint::from_radix_be(inbase190, 190).unwrap();
+ /// assert_eq!(a.to_radix_be(190), inbase190);
+ /// ```
+ pub fn from_radix_be(buf: &[u8], radix: u32) -> Option<BigUint> {
+ convert::from_radix_be(buf, radix)
+ }
+
+ /// Creates and initializes a `BigUint`. Each u8 of the input slice is
+ /// interpreted as one digit of the number
+ /// and must therefore be less than `radix`.
+ ///
+ /// The bytes are in little-endian byte order.
+ /// `radix` must be in the range `2...256`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{BigUint};
+ ///
+ /// let inbase190 = &[14, 12, 125, 33, 15];
+ /// let a = BigUint::from_radix_be(inbase190, 190).unwrap();
+ /// assert_eq!(a.to_radix_be(190), inbase190);
+ /// ```
+ pub fn from_radix_le(buf: &[u8], radix: u32) -> Option<BigUint> {
+ convert::from_radix_le(buf, radix)
+ }
+
+ /// Returns the byte representation of the `BigUint` in big-endian byte order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::BigUint;
+ ///
+ /// let i = BigUint::parse_bytes(b"1125", 10).unwrap();
+ /// assert_eq!(i.to_bytes_be(), vec![4, 101]);
+ /// ```
+ #[inline]
+ pub fn to_bytes_be(&self) -> Vec<u8> {
+ let mut v = self.to_bytes_le();
+ v.reverse();
+ v
+ }
+
+ /// Returns the byte representation of the `BigUint` in little-endian byte order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::BigUint;
+ ///
+ /// let i = BigUint::parse_bytes(b"1125", 10).unwrap();
+ /// assert_eq!(i.to_bytes_le(), vec![101, 4]);
+ /// ```
+ #[inline]
+ pub fn to_bytes_le(&self) -> Vec<u8> {
+ if self.is_zero() {
+ vec![0]
+ } else {
+ convert::to_bitwise_digits_le(self, 8)
+ }
+ }
+
+ /// Returns the `u32` digits representation of the `BigUint` ordered least significant digit
+ /// first.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::BigUint;
+ ///
+ /// assert_eq!(BigUint::from(1125u32).to_u32_digits(), vec![1125]);
+ /// assert_eq!(BigUint::from(4294967295u32).to_u32_digits(), vec![4294967295]);
+ /// assert_eq!(BigUint::from(4294967296u64).to_u32_digits(), vec![0, 1]);
+ /// assert_eq!(BigUint::from(112500000000u64).to_u32_digits(), vec![830850304, 26]);
+ /// ```
+ #[inline]
+ pub fn to_u32_digits(&self) -> Vec<u32> {
+ self.iter_u32_digits().collect()
+ }
+
+ /// Returns the `u64` digits representation of the `BigUint` ordered least significant digit
+ /// first.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::BigUint;
+ ///
+ /// assert_eq!(BigUint::from(1125u32).to_u64_digits(), vec![1125]);
+ /// assert_eq!(BigUint::from(4294967295u32).to_u64_digits(), vec![4294967295]);
+ /// assert_eq!(BigUint::from(4294967296u64).to_u64_digits(), vec![4294967296]);
+ /// assert_eq!(BigUint::from(112500000000u64).to_u64_digits(), vec![112500000000]);
+ /// assert_eq!(BigUint::from(1u128 << 64).to_u64_digits(), vec![0, 1]);
+ /// ```
+ #[inline]
+ pub fn to_u64_digits(&self) -> Vec<u64> {
+ self.iter_u64_digits().collect()
+ }
+
+ /// Returns an iterator of `u32` digits representation of the `BigUint` ordered least
+ /// significant digit first.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::BigUint;
+ ///
+ /// assert_eq!(BigUint::from(1125u32).iter_u32_digits().collect::<Vec<u32>>(), vec![1125]);
+ /// assert_eq!(BigUint::from(4294967295u32).iter_u32_digits().collect::<Vec<u32>>(), vec![4294967295]);
+ /// assert_eq!(BigUint::from(4294967296u64).iter_u32_digits().collect::<Vec<u32>>(), vec![0, 1]);
+ /// assert_eq!(BigUint::from(112500000000u64).iter_u32_digits().collect::<Vec<u32>>(), vec![830850304, 26]);
+ /// ```
+ #[inline]
+ pub fn iter_u32_digits(&self) -> U32Digits<'_> {
+ U32Digits::new(self.data.as_slice())
+ }
+
+ /// Returns an iterator of `u64` digits representation of the `BigUint` ordered least
+ /// significant digit first.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::BigUint;
+ ///
+ /// assert_eq!(BigUint::from(1125u32).iter_u64_digits().collect::<Vec<u64>>(), vec![1125]);
+ /// assert_eq!(BigUint::from(4294967295u32).iter_u64_digits().collect::<Vec<u64>>(), vec![4294967295]);
+ /// assert_eq!(BigUint::from(4294967296u64).iter_u64_digits().collect::<Vec<u64>>(), vec![4294967296]);
+ /// assert_eq!(BigUint::from(112500000000u64).iter_u64_digits().collect::<Vec<u64>>(), vec![112500000000]);
+ /// assert_eq!(BigUint::from(1u128 << 64).iter_u64_digits().collect::<Vec<u64>>(), vec![0, 1]);
+ /// ```
+ #[inline]
+ pub fn iter_u64_digits(&self) -> U64Digits<'_> {
+ U64Digits::new(self.data.as_slice())
+ }
+
+ /// Returns the integer formatted as a string in the given radix.
+ /// `radix` must be in the range `2...36`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::BigUint;
+ ///
+ /// let i = BigUint::parse_bytes(b"ff", 16).unwrap();
+ /// assert_eq!(i.to_str_radix(16), "ff");
+ /// ```
+ #[inline]
+ pub fn to_str_radix(&self, radix: u32) -> String {
+ let mut v = to_str_radix_reversed(self, radix);
+ v.reverse();
+ unsafe { String::from_utf8_unchecked(v) }
+ }
+
+ /// Returns the integer in the requested base in big-endian digit order.
+ /// The output is not given in a human readable alphabet but as a zero
+ /// based u8 number.
+ /// `radix` must be in the range `2...256`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::BigUint;
+ ///
+ /// assert_eq!(BigUint::from(0xFFFFu64).to_radix_be(159),
+ /// vec![2, 94, 27]);
+ /// // 0xFFFF = 65535 = 2*(159^2) + 94*159 + 27
+ /// ```
+ #[inline]
+ pub fn to_radix_be(&self, radix: u32) -> Vec<u8> {
+ let mut v = convert::to_radix_le(self, radix);
+ v.reverse();
+ v
+ }
+
+ /// Returns the integer in the requested base in little-endian digit order.
+ /// The output is not given in a human readable alphabet but as a zero
+ /// based u8 number.
+ /// `radix` must be in the range `2...256`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::BigUint;
+ ///
+ /// assert_eq!(BigUint::from(0xFFFFu64).to_radix_le(159),
+ /// vec![27, 94, 2]);
+ /// // 0xFFFF = 65535 = 27 + 94*159 + 2*(159^2)
+ /// ```
+ #[inline]
+ pub fn to_radix_le(&self, radix: u32) -> Vec<u8> {
+ convert::to_radix_le(self, radix)
+ }
+
+ /// Determines the fewest bits necessary to express the `BigUint`.
+ #[inline]
+ pub fn bits(&self) -> u64 {
+ if self.is_zero() {
+ return 0;
+ }
+ let zeros: u64 = self.data.last().unwrap().leading_zeros().into();
+ self.data.len() as u64 * u64::from(big_digit::BITS) - zeros
+ }
+
+ /// Strips off trailing zero bigdigits - comparisons require the last element in the vector to
+ /// be nonzero.
+ #[inline]
+ fn normalize(&mut self) {
+ while let Some(&0) = self.data.last() {
+ self.data.pop();
+ }
+ if self.data.len() < self.data.capacity() / 4 {
+ self.data.shrink_to_fit();
+ }
+ }
+
+ /// Returns a normalized `BigUint`.
+ #[inline]
+ fn normalized(mut self) -> BigUint {
+ self.normalize();
+ self
+ }
+
+ /// Returns `self ^ exponent`.
+ pub fn pow(&self, exponent: u32) -> Self {
+ Pow::pow(self, exponent)
+ }
+
+ /// Returns `(self ^ exponent) % modulus`.
+ ///
+ /// Panics if the modulus is zero.
+ pub fn modpow(&self, exponent: &Self, modulus: &Self) -> Self {
+ power::modpow(self, exponent, modulus)
+ }
+
+ /// Returns the truncated principal square root of `self` --
+ /// see [Roots::sqrt](https://docs.rs/num-integer/0.1/num_integer/trait.Roots.html#method.sqrt)
+ pub fn sqrt(&self) -> Self {
+ Roots::sqrt(self)
+ }
+
+ /// Returns the truncated principal cube root of `self` --
+ /// see [Roots::cbrt](https://docs.rs/num-integer/0.1/num_integer/trait.Roots.html#method.cbrt).
+ pub fn cbrt(&self) -> Self {
+ Roots::cbrt(self)
+ }
+
+ /// Returns the truncated principal `n`th root of `self` --
+ /// see [Roots::nth_root](https://docs.rs/num-integer/0.1/num_integer/trait.Roots.html#tymethod.nth_root).
+ pub fn nth_root(&self, n: u32) -> Self {
+ Roots::nth_root(self, n)
+ }
+
+ /// Returns the number of least-significant bits that are zero,
+ /// or `None` if the entire number is zero.
+ pub fn trailing_zeros(&self) -> Option<u64> {
+ let i = self.data.iter().position(|&digit| digit != 0)?;
+ let zeros: u64 = self.data[i].trailing_zeros().into();
+ Some(i as u64 * u64::from(big_digit::BITS) + zeros)
+ }
+
+ /// Returns the number of least-significant bits that are ones.
+ pub fn trailing_ones(&self) -> u64 {
+ if let Some(i) = self.data.iter().position(|&digit| !digit != 0) {
+ // XXX u64::trailing_ones() introduced in Rust 1.46,
+ // but we need to be compatible further back.
+ // Thanks to cuviper for this workaround.
+ let ones: u64 = (!self.data[i]).trailing_zeros().into();
+ i as u64 * u64::from(big_digit::BITS) + ones
+ } else {
+ self.data.len() as u64 * u64::from(big_digit::BITS)
+ }
+ }
+
+ /// Returns the number of one bits.
+ pub fn count_ones(&self) -> u64 {
+ self.data.iter().map(|&d| u64::from(d.count_ones())).sum()
+ }
+
+ /// Returns whether the bit in the given position is set
+ pub fn bit(&self, bit: u64) -> bool {
+ let bits_per_digit = u64::from(big_digit::BITS);
+ if let Some(digit_index) = (bit / bits_per_digit).to_usize() {
+ if let Some(digit) = self.data.get(digit_index) {
+ let bit_mask = (1 as BigDigit) << (bit % bits_per_digit);
+ return (digit & bit_mask) != 0;
+ }
+ }
+ false
+ }
+
+ /// Sets or clears the bit in the given position
+ ///
+ /// Note that setting a bit greater than the current bit length, a reallocation may be needed
+ /// to store the new digits
+ pub fn set_bit(&mut self, bit: u64, value: bool) {
+ // Note: we're saturating `digit_index` and `new_len` -- any such case is guaranteed to
+ // fail allocation, and that's more consistent than adding our own overflow panics.
+ let bits_per_digit = u64::from(big_digit::BITS);
+ let digit_index = (bit / bits_per_digit)
+ .to_usize()
+ .unwrap_or(core::usize::MAX);
+ let bit_mask = (1 as BigDigit) << (bit % bits_per_digit);
+ if value {
+ if digit_index >= self.data.len() {
+ let new_len = digit_index.saturating_add(1);
+ self.data.resize(new_len, 0);
+ }
+ self.data[digit_index] |= bit_mask;
+ } else if digit_index < self.data.len() {
+ self.data[digit_index] &= !bit_mask;
+ // the top bit may have been cleared, so normalize
+ self.normalize();
+ }
+ }
+}
+
+pub(crate) trait IntDigits {
+ fn digits(&self) -> &[BigDigit];
+ fn digits_mut(&mut self) -> &mut Vec<BigDigit>;
+ fn normalize(&mut self);
+ fn capacity(&self) -> usize;
+ fn len(&self) -> usize;
+}
+
+impl IntDigits for BigUint {
+ #[inline]
+ fn digits(&self) -> &[BigDigit] {
+ &self.data
+ }
+ #[inline]
+ fn digits_mut(&mut self) -> &mut Vec<BigDigit> {
+ &mut self.data
+ }
+ #[inline]
+ fn normalize(&mut self) {
+ self.normalize();
+ }
+ #[inline]
+ fn capacity(&self) -> usize {
+ self.data.capacity()
+ }
+ #[inline]
+ fn len(&self) -> usize {
+ self.data.len()
+ }
+}
+
+/// Convert a u32 chunk (len is either 1 or 2) to a single u64 digit
+#[inline]
+fn u32_chunk_to_u64(chunk: &[u32]) -> u64 {
+ // raw could have odd length
+ let mut digit = chunk[0] as u64;
+ if let Some(&hi) = chunk.get(1) {
+ digit |= (hi as u64) << 32;
+ }
+ digit
+}
+
+/// Combine four `u32`s into a single `u128`.
+#[cfg(any(test, not(u64_digit)))]
+#[inline]
+fn u32_to_u128(a: u32, b: u32, c: u32, d: u32) -> u128 {
+ u128::from(d) | (u128::from(c) << 32) | (u128::from(b) << 64) | (u128::from(a) << 96)
+}
+
+/// Split a single `u128` into four `u32`.
+#[cfg(any(test, not(u64_digit)))]
+#[inline]
+fn u32_from_u128(n: u128) -> (u32, u32, u32, u32) {
+ (
+ (n >> 96) as u32,
+ (n >> 64) as u32,
+ (n >> 32) as u32,
+ n as u32,
+ )
+}
+
+#[cfg(not(u64_digit))]
+#[test]
+fn test_from_slice() {
+ fn check(slice: &[u32], data: &[BigDigit]) {
+ assert_eq!(BigUint::from_slice(slice).data, data);
+ }
+ check(&[1], &[1]);
+ check(&[0, 0, 0], &[]);
+ check(&[1, 2, 0, 0], &[1, 2]);
+ check(&[0, 0, 1, 2], &[0, 0, 1, 2]);
+ check(&[0, 0, 1, 2, 0, 0], &[0, 0, 1, 2]);
+ check(&[-1i32 as u32], &[-1i32 as BigDigit]);
+}
+
+#[cfg(u64_digit)]
+#[test]
+fn test_from_slice() {
+ fn check(slice: &[u32], data: &[BigDigit]) {
+ assert_eq!(
+ BigUint::from_slice(slice).data,
+ data,
+ "from {:?}, to {:?}",
+ slice,
+ data
+ );
+ }
+ check(&[1], &[1]);
+ check(&[0, 0, 0], &[]);
+ check(&[1, 2], &[8_589_934_593]);
+ check(&[1, 2, 0, 0], &[8_589_934_593]);
+ check(&[0, 0, 1, 2], &[0, 8_589_934_593]);
+ check(&[0, 0, 1, 2, 0, 0], &[0, 8_589_934_593]);
+ check(&[-1i32 as u32], &[(-1i32 as u32) as BigDigit]);
+}
+
+#[test]
+fn test_u32_u128() {
+ assert_eq!(u32_from_u128(0u128), (0, 0, 0, 0));
+ assert_eq!(
+ u32_from_u128(u128::max_value()),
+ (
+ u32::max_value(),
+ u32::max_value(),
+ u32::max_value(),
+ u32::max_value()
+ )
+ );
+
+ assert_eq!(
+ u32_from_u128(u32::max_value() as u128),
+ (0, 0, 0, u32::max_value())
+ );
+
+ assert_eq!(
+ u32_from_u128(u64::max_value() as u128),
+ (0, 0, u32::max_value(), u32::max_value())
+ );
+
+ assert_eq!(
+ u32_from_u128((u64::max_value() as u128) + u32::max_value() as u128),
+ (0, 1, 0, u32::max_value() - 1)
+ );
+
+ assert_eq!(u32_from_u128(36_893_488_151_714_070_528), (0, 2, 1, 0));
+}
+
+#[test]
+fn test_u128_u32_roundtrip() {
+ // roundtrips
+ let values = vec![
+ 0u128,
+ 1u128,
+ u64::max_value() as u128 * 3,
+ u32::max_value() as u128,
+ u64::max_value() as u128,
+ (u64::max_value() as u128) + u32::max_value() as u128,
+ u128::max_value(),
+ ];
+
+ for val in &values {
+ let (a, b, c, d) = u32_from_u128(*val);
+ assert_eq!(u32_to_u128(a, b, c, d), *val);
+ }
+}
diff --git a/src/biguint/addition.rs b/src/biguint/addition.rs
new file mode 100644
index 0000000..e54f8cb
--- /dev/null
+++ b/src/biguint/addition.rs
@@ -0,0 +1,254 @@
+#[cfg(not(u64_digit))]
+use super::u32_from_u128;
+use super::{BigUint, IntDigits};
+
+use crate::big_digit::{self, BigDigit};
+use crate::UsizePromotion;
+
+use core::iter::Sum;
+use core::ops::{Add, AddAssign};
+use num_traits::{CheckedAdd, Zero};
+
+#[cfg(all(use_addcarry, target_arch = "x86_64"))]
+use core::arch::x86_64 as arch;
+
+#[cfg(all(use_addcarry, target_arch = "x86"))]
+use core::arch::x86 as arch;
+
+// Add with carry:
+#[cfg(all(use_addcarry, u64_digit))]
+#[inline]
+fn adc(carry: u8, a: u64, b: u64, out: &mut u64) -> u8 {
+ // Safety: There are absolutely no safety concerns with calling `_addcarry_u64`.
+ // It's just unsafe for API consistency with other intrinsics.
+ unsafe { arch::_addcarry_u64(carry, a, b, out) }
+}
+
+#[cfg(all(use_addcarry, not(u64_digit)))]
+#[inline]
+fn adc(carry: u8, a: u32, b: u32, out: &mut u32) -> u8 {
+ // Safety: There are absolutely no safety concerns with calling `_addcarry_u32`.
+ // It's just unsafe for API consistency with other intrinsics.
+ unsafe { arch::_addcarry_u32(carry, a, b, out) }
+}
+
+// fallback for environments where we don't have an addcarry intrinsic
+#[cfg(not(use_addcarry))]
+#[inline]
+fn adc(carry: u8, a: BigDigit, b: BigDigit, out: &mut BigDigit) -> u8 {
+ use crate::big_digit::DoubleBigDigit;
+
+ let sum = DoubleBigDigit::from(a) + DoubleBigDigit::from(b) + DoubleBigDigit::from(carry);
+ *out = sum as BigDigit;
+ (sum >> big_digit::BITS) as u8
+}
+
+/// Two argument addition of raw slices, `a += b`, returning the carry.
+///
+/// This is used when the data `Vec` might need to resize to push a non-zero carry, so we perform
+/// the addition first hoping that it will fit.
+///
+/// The caller _must_ ensure that `a` is at least as long as `b`.
+#[inline]
+pub(super) fn __add2(a: &mut [BigDigit], b: &[BigDigit]) -> BigDigit {
+ debug_assert!(a.len() >= b.len());
+
+ let mut carry = 0;
+ let (a_lo, a_hi) = a.split_at_mut(b.len());
+
+ for (a, b) in a_lo.iter_mut().zip(b) {
+ carry = adc(carry, *a, *b, a);
+ }
+
+ if carry != 0 {
+ for a in a_hi {
+ carry = adc(carry, *a, 0, a);
+ if carry == 0 {
+ break;
+ }
+ }
+ }
+
+ carry as BigDigit
+}
+
+/// Two argument addition of raw slices:
+/// a += b
+///
+/// The caller _must_ ensure that a is big enough to store the result - typically this means
+/// resizing a to max(a.len(), b.len()) + 1, to fit a possible carry.
+pub(super) fn add2(a: &mut [BigDigit], b: &[BigDigit]) {
+ let carry = __add2(a, b);
+
+ debug_assert!(carry == 0);
+}
+
+forward_all_binop_to_val_ref_commutative!(impl Add for BigUint, add);
+forward_val_assign!(impl AddAssign for BigUint, add_assign);
+
+impl<'a> Add<&'a BigUint> for BigUint {
+ type Output = BigUint;
+
+ fn add(mut self, other: &BigUint) -> BigUint {
+ self += other;
+ self
+ }
+}
+impl<'a> AddAssign<&'a BigUint> for BigUint {
+ #[inline]
+ fn add_assign(&mut self, other: &BigUint) {
+ let self_len = self.data.len();
+ let carry = if self_len < other.data.len() {
+ let lo_carry = __add2(&mut self.data[..], &other.data[..self_len]);
+ self.data.extend_from_slice(&other.data[self_len..]);
+ __add2(&mut self.data[self_len..], &[lo_carry])
+ } else {
+ __add2(&mut self.data[..], &other.data[..])
+ };
+ if carry != 0 {
+ self.data.push(carry);
+ }
+ }
+}
+
+promote_unsigned_scalars!(impl Add for BigUint, add);
+promote_unsigned_scalars_assign!(impl AddAssign for BigUint, add_assign);
+forward_all_scalar_binop_to_val_val_commutative!(impl Add<u32> for BigUint, add);
+forward_all_scalar_binop_to_val_val_commutative!(impl Add<u64> for BigUint, add);
+forward_all_scalar_binop_to_val_val_commutative!(impl Add<u128> for BigUint, add);
+
+impl Add<u32> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn add(mut self, other: u32) -> BigUint {
+ self += other;
+ self
+ }
+}
+
+impl AddAssign<u32> for BigUint {
+ #[inline]
+ fn add_assign(&mut self, other: u32) {
+ if other != 0 {
+ if self.data.is_empty() {
+ self.data.push(0);
+ }
+
+ let carry = __add2(&mut self.data, &[other as BigDigit]);
+ if carry != 0 {
+ self.data.push(carry);
+ }
+ }
+ }
+}
+
+impl Add<u64> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn add(mut self, other: u64) -> BigUint {
+ self += other;
+ self
+ }
+}
+
+impl AddAssign<u64> for BigUint {
+ #[cfg(not(u64_digit))]
+ #[inline]
+ fn add_assign(&mut self, other: u64) {
+ let (hi, lo) = big_digit::from_doublebigdigit(other);
+ if hi == 0 {
+ *self += lo;
+ } else {
+ while self.data.len() < 2 {
+ self.data.push(0);
+ }
+
+ let carry = __add2(&mut self.data, &[lo, hi]);
+ if carry != 0 {
+ self.data.push(carry);
+ }
+ }
+ }
+
+ #[cfg(u64_digit)]
+ #[inline]
+ fn add_assign(&mut self, other: u64) {
+ if other != 0 {
+ if self.data.is_empty() {
+ self.data.push(0);
+ }
+
+ let carry = __add2(&mut self.data, &[other as BigDigit]);
+ if carry != 0 {
+ self.data.push(carry);
+ }
+ }
+ }
+}
+
+impl Add<u128> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn add(mut self, other: u128) -> BigUint {
+ self += other;
+ self
+ }
+}
+
+impl AddAssign<u128> for BigUint {
+ #[cfg(not(u64_digit))]
+ #[inline]
+ fn add_assign(&mut self, other: u128) {
+ if other <= u128::from(u64::max_value()) {
+ *self += other as u64
+ } else {
+ let (a, b, c, d) = u32_from_u128(other);
+ let carry = if a > 0 {
+ while self.data.len() < 4 {
+ self.data.push(0);
+ }
+ __add2(&mut self.data, &[d, c, b, a])
+ } else {
+ debug_assert!(b > 0);
+ while self.data.len() < 3 {
+ self.data.push(0);
+ }
+ __add2(&mut self.data, &[d, c, b])
+ };
+
+ if carry != 0 {
+ self.data.push(carry);
+ }
+ }
+ }
+
+ #[cfg(u64_digit)]
+ #[inline]
+ fn add_assign(&mut self, other: u128) {
+ let (hi, lo) = big_digit::from_doublebigdigit(other);
+ if hi == 0 {
+ *self += lo;
+ } else {
+ while self.data.len() < 2 {
+ self.data.push(0);
+ }
+
+ let carry = __add2(&mut self.data, &[lo, hi]);
+ if carry != 0 {
+ self.data.push(carry);
+ }
+ }
+ }
+}
+
+impl CheckedAdd for BigUint {
+ #[inline]
+ fn checked_add(&self, v: &BigUint) -> Option<BigUint> {
+ Some(self.add(v))
+ }
+}
+
+impl_sum_iter_type!(BigUint);
diff --git a/src/biguint/arbitrary.rs b/src/biguint/arbitrary.rs
new file mode 100644
index 0000000..6fa91c0
--- /dev/null
+++ b/src/biguint/arbitrary.rs
@@ -0,0 +1,34 @@
+use super::{biguint_from_vec, BigUint};
+
+use crate::big_digit::BigDigit;
+#[cfg(feature = "quickcheck")]
+use crate::std_alloc::Box;
+use crate::std_alloc::Vec;
+
+#[cfg(feature = "quickcheck")]
+impl quickcheck::Arbitrary for BigUint {
+ fn arbitrary(g: &mut quickcheck::Gen) -> Self {
+ // Use arbitrary from Vec
+ biguint_from_vec(Vec::<BigDigit>::arbitrary(g))
+ }
+
+ fn shrink(&self) -> Box<dyn Iterator<Item = Self>> {
+ // Use shrinker from Vec
+ Box::new(self.data.shrink().map(biguint_from_vec))
+ }
+}
+
+#[cfg(feature = "arbitrary")]
+impl arbitrary::Arbitrary<'_> for BigUint {
+ fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result<Self> {
+ Ok(biguint_from_vec(Vec::<BigDigit>::arbitrary(u)?))
+ }
+
+ fn arbitrary_take_rest(u: arbitrary::Unstructured<'_>) -> arbitrary::Result<Self> {
+ Ok(biguint_from_vec(Vec::<BigDigit>::arbitrary_take_rest(u)?))
+ }
+
+ fn size_hint(depth: usize) -> (usize, Option<usize>) {
+ Vec::<BigDigit>::size_hint(depth)
+ }
+}
diff --git a/src/biguint/bits.rs b/src/biguint/bits.rs
new file mode 100644
index 0000000..58c755a
--- /dev/null
+++ b/src/biguint/bits.rs
@@ -0,0 +1,93 @@
+use super::{BigUint, IntDigits};
+
+use core::ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign};
+
+forward_val_val_binop!(impl BitAnd for BigUint, bitand);
+forward_ref_val_binop!(impl BitAnd for BigUint, bitand);
+
+// do not use forward_ref_ref_binop_commutative! for bitand so that we can
+// clone the smaller value rather than the larger, avoiding over-allocation
+impl<'a, 'b> BitAnd<&'b BigUint> for &'a BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn bitand(self, other: &BigUint) -> BigUint {
+ // forward to val-ref, choosing the smaller to clone
+ if self.data.len() <= other.data.len() {
+ self.clone() & other
+ } else {
+ other.clone() & self
+ }
+ }
+}
+
+forward_val_assign!(impl BitAndAssign for BigUint, bitand_assign);
+
+impl<'a> BitAnd<&'a BigUint> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn bitand(mut self, other: &BigUint) -> BigUint {
+ self &= other;
+ self
+ }
+}
+impl<'a> BitAndAssign<&'a BigUint> for BigUint {
+ #[inline]
+ fn bitand_assign(&mut self, other: &BigUint) {
+ for (ai, &bi) in self.data.iter_mut().zip(other.data.iter()) {
+ *ai &= bi;
+ }
+ self.data.truncate(other.data.len());
+ self.normalize();
+ }
+}
+
+forward_all_binop_to_val_ref_commutative!(impl BitOr for BigUint, bitor);
+forward_val_assign!(impl BitOrAssign for BigUint, bitor_assign);
+
+impl<'a> BitOr<&'a BigUint> for BigUint {
+ type Output = BigUint;
+
+ fn bitor(mut self, other: &BigUint) -> BigUint {
+ self |= other;
+ self
+ }
+}
+impl<'a> BitOrAssign<&'a BigUint> for BigUint {
+ #[inline]
+ fn bitor_assign(&mut self, other: &BigUint) {
+ for (ai, &bi) in self.data.iter_mut().zip(other.data.iter()) {
+ *ai |= bi;
+ }
+ if other.data.len() > self.data.len() {
+ let extra = &other.data[self.data.len()..];
+ self.data.extend(extra.iter().cloned());
+ }
+ }
+}
+
+forward_all_binop_to_val_ref_commutative!(impl BitXor for BigUint, bitxor);
+forward_val_assign!(impl BitXorAssign for BigUint, bitxor_assign);
+
+impl<'a> BitXor<&'a BigUint> for BigUint {
+ type Output = BigUint;
+
+ fn bitxor(mut self, other: &BigUint) -> BigUint {
+ self ^= other;
+ self
+ }
+}
+impl<'a> BitXorAssign<&'a BigUint> for BigUint {
+ #[inline]
+ fn bitxor_assign(&mut self, other: &BigUint) {
+ for (ai, &bi) in self.data.iter_mut().zip(other.data.iter()) {
+ *ai ^= bi;
+ }
+ if other.data.len() > self.data.len() {
+ let extra = &other.data[self.data.len()..];
+ self.data.extend(extra.iter().cloned());
+ }
+ self.normalize();
+ }
+}
diff --git a/src/biguint/convert.rs b/src/biguint/convert.rs
new file mode 100644
index 0000000..278ec78
--- /dev/null
+++ b/src/biguint/convert.rs
@@ -0,0 +1,756 @@
+use super::{biguint_from_vec, BigUint, ToBigUint};
+
+use super::addition::add2;
+use super::division::div_rem_digit;
+use super::multiplication::mac_with_carry;
+
+use crate::big_digit::{self, BigDigit};
+use crate::std_alloc::Vec;
+use crate::ParseBigIntError;
+#[cfg(has_try_from)]
+use crate::TryFromBigIntError;
+
+use core::cmp::Ordering::{Equal, Greater, Less};
+#[cfg(has_try_from)]
+use core::convert::TryFrom;
+use core::mem;
+use core::str::FromStr;
+use num_integer::Integer;
+use num_traits::float::FloatCore;
+use num_traits::{FromPrimitive, Num, PrimInt, ToPrimitive, Zero};
+
+/// Find last set bit
+/// fls(0) == 0, fls(u32::MAX) == 32
+fn fls<T: PrimInt>(v: T) -> u8 {
+ mem::size_of::<T>() as u8 * 8 - v.leading_zeros() as u8
+}
+
+fn ilog2<T: PrimInt>(v: T) -> u8 {
+ fls(v) - 1
+}
+
+impl FromStr for BigUint {
+ type Err = ParseBigIntError;
+
+ #[inline]
+ fn from_str(s: &str) -> Result<BigUint, ParseBigIntError> {
+ BigUint::from_str_radix(s, 10)
+ }
+}
+
+// Convert from a power of two radix (bits == ilog2(radix)) where bits evenly divides
+// BigDigit::BITS
+pub(super) fn from_bitwise_digits_le(v: &[u8], bits: u8) -> BigUint {
+ debug_assert!(!v.is_empty() && bits <= 8 && big_digit::BITS % bits == 0);
+ debug_assert!(v.iter().all(|&c| BigDigit::from(c) < (1 << bits)));
+
+ let digits_per_big_digit = big_digit::BITS / bits;
+
+ let data = v
+ .chunks(digits_per_big_digit.into())
+ .map(|chunk| {
+ chunk
+ .iter()
+ .rev()
+ .fold(0, |acc, &c| (acc << bits) | BigDigit::from(c))
+ })
+ .collect();
+
+ biguint_from_vec(data)
+}
+
+// Convert from a power of two radix (bits == ilog2(radix)) where bits doesn't evenly divide
+// BigDigit::BITS
+fn from_inexact_bitwise_digits_le(v: &[u8], bits: u8) -> BigUint {
+ debug_assert!(!v.is_empty() && bits <= 8 && big_digit::BITS % bits != 0);
+ debug_assert!(v.iter().all(|&c| BigDigit::from(c) < (1 << bits)));
+
+ let big_digits = (v.len() as u64)
+ .saturating_mul(bits.into())
+ .div_ceil(&big_digit::BITS.into())
+ .to_usize()
+ .unwrap_or(core::usize::MAX);
+ let mut data = Vec::with_capacity(big_digits);
+
+ let mut d = 0;
+ let mut dbits = 0; // number of bits we currently have in d
+
+ // walk v accumululating bits in d; whenever we accumulate big_digit::BITS in d, spit out a
+ // big_digit:
+ for &c in v {
+ d |= BigDigit::from(c) << dbits;
+ dbits += bits;
+
+ if dbits >= big_digit::BITS {
+ data.push(d);
+ dbits -= big_digit::BITS;
+ // if dbits was > big_digit::BITS, we dropped some of the bits in c (they couldn't fit
+ // in d) - grab the bits we lost here:
+ d = BigDigit::from(c) >> (bits - dbits);
+ }
+ }
+
+ if dbits > 0 {
+ debug_assert!(dbits < big_digit::BITS);
+ data.push(d as BigDigit);
+ }
+
+ biguint_from_vec(data)
+}
+
+// Read little-endian radix digits
+fn from_radix_digits_be(v: &[u8], radix: u32) -> BigUint {
+ debug_assert!(!v.is_empty() && !radix.is_power_of_two());
+ debug_assert!(v.iter().all(|&c| u32::from(c) < radix));
+
+ #[cfg(feature = "std")]
+ let radix_log2 = f64::from(radix).log2();
+ #[cfg(not(feature = "std"))]
+ let radix_log2 = ilog2(radix.next_power_of_two()) as f64;
+
+ // Estimate how big the result will be, so we can pre-allocate it.
+ let bits = radix_log2 * v.len() as f64;
+ let big_digits = (bits / big_digit::BITS as f64).ceil();
+ let mut data = Vec::with_capacity(big_digits.to_usize().unwrap_or(0));
+
+ let (base, power) = get_radix_base(radix, big_digit::BITS);
+ let radix = radix as BigDigit;
+
+ let r = v.len() % power;
+ let i = if r == 0 { power } else { r };
+ let (head, tail) = v.split_at(i);
+
+ let first = head
+ .iter()
+ .fold(0, |acc, &d| acc * radix + BigDigit::from(d));
+ data.push(first);
+
+ debug_assert!(tail.len() % power == 0);
+ for chunk in tail.chunks(power) {
+ if data.last() != Some(&0) {
+ data.push(0);
+ }
+
+ let mut carry = 0;
+ for d in data.iter_mut() {
+ *d = mac_with_carry(0, *d, base, &mut carry);
+ }
+ debug_assert!(carry == 0);
+
+ let n = chunk
+ .iter()
+ .fold(0, |acc, &d| acc * radix + BigDigit::from(d));
+ add2(&mut data, &[n]);
+ }
+
+ biguint_from_vec(data)
+}
+
+pub(super) fn from_radix_be(buf: &[u8], radix: u32) -> Option<BigUint> {
+ assert!(
+ 2 <= radix && radix <= 256,
+ "The radix must be within 2...256"
+ );
+
+ if buf.is_empty() {
+ return Some(Zero::zero());
+ }
+
+ if radix != 256 && buf.iter().any(|&b| b >= radix as u8) {
+ return None;
+ }
+
+ let res = if radix.is_power_of_two() {
+ // Powers of two can use bitwise masks and shifting instead of multiplication
+ let bits = ilog2(radix);
+ let mut v = Vec::from(buf);
+ v.reverse();
+ if big_digit::BITS % bits == 0 {
+ from_bitwise_digits_le(&v, bits)
+ } else {
+ from_inexact_bitwise_digits_le(&v, bits)
+ }
+ } else {
+ from_radix_digits_be(buf, radix)
+ };
+
+ Some(res)
+}
+
+pub(super) fn from_radix_le(buf: &[u8], radix: u32) -> Option<BigUint> {
+ assert!(
+ 2 <= radix && radix <= 256,
+ "The radix must be within 2...256"
+ );
+
+ if buf.is_empty() {
+ return Some(Zero::zero());
+ }
+
+ if radix != 256 && buf.iter().any(|&b| b >= radix as u8) {
+ return None;
+ }
+
+ let res = if radix.is_power_of_two() {
+ // Powers of two can use bitwise masks and shifting instead of multiplication
+ let bits = ilog2(radix);
+ if big_digit::BITS % bits == 0 {
+ from_bitwise_digits_le(buf, bits)
+ } else {
+ from_inexact_bitwise_digits_le(buf, bits)
+ }
+ } else {
+ let mut v = Vec::from(buf);
+ v.reverse();
+ from_radix_digits_be(&v, radix)
+ };
+
+ Some(res)
+}
+
+impl Num for BigUint {
+ type FromStrRadixErr = ParseBigIntError;
+
+ /// Creates and initializes a `BigUint`.
+ fn from_str_radix(s: &str, radix: u32) -> Result<BigUint, ParseBigIntError> {
+ assert!(2 <= radix && radix <= 36, "The radix must be within 2...36");
+ let mut s = s;
+ if s.starts_with('+') {
+ let tail = &s[1..];
+ if !tail.starts_with('+') {
+ s = tail
+ }
+ }
+
+ if s.is_empty() {
+ return Err(ParseBigIntError::empty());
+ }
+
+ if s.starts_with('_') {
+ // Must lead with a real digit!
+ return Err(ParseBigIntError::invalid());
+ }
+
+ // First normalize all characters to plain digit values
+ let mut v = Vec::with_capacity(s.len());
+ for b in s.bytes() {
+ let d = match b {
+ b'0'..=b'9' => b - b'0',
+ b'a'..=b'z' => b - b'a' + 10,
+ b'A'..=b'Z' => b - b'A' + 10,
+ b'_' => continue,
+ _ => core::u8::MAX,
+ };
+ if d < radix as u8 {
+ v.push(d);
+ } else {
+ return Err(ParseBigIntError::invalid());
+ }
+ }
+
+ let res = if radix.is_power_of_two() {
+ // Powers of two can use bitwise masks and shifting instead of multiplication
+ let bits = ilog2(radix);
+ v.reverse();
+ if big_digit::BITS % bits == 0 {
+ from_bitwise_digits_le(&v, bits)
+ } else {
+ from_inexact_bitwise_digits_le(&v, bits)
+ }
+ } else {
+ from_radix_digits_be(&v, radix)
+ };
+ Ok(res)
+ }
+}
+
+fn high_bits_to_u64(v: &BigUint) -> u64 {
+ match v.data.len() {
+ 0 => 0,
+ 1 => {
+ // XXX Conversion is useless if already 64-bit.
+ #[allow(clippy::useless_conversion)]
+ let v0 = u64::from(v.data[0]);
+ v0
+ }
+ _ => {
+ let mut bits = v.bits();
+ let mut ret = 0u64;
+ let mut ret_bits = 0;
+
+ for d in v.data.iter().rev() {
+ let digit_bits = (bits - 1) % u64::from(big_digit::BITS) + 1;
+ let bits_want = Ord::min(64 - ret_bits, digit_bits);
+
+ if bits_want != 64 {
+ ret <<= bits_want;
+ }
+ // XXX Conversion is useless if already 64-bit.
+ #[allow(clippy::useless_conversion)]
+ let d0 = u64::from(*d) >> (digit_bits - bits_want);
+ ret |= d0;
+ ret_bits += bits_want;
+ bits -= bits_want;
+
+ if ret_bits == 64 {
+ break;
+ }
+ }
+
+ ret
+ }
+ }
+}
+
+impl ToPrimitive for BigUint {
+ #[inline]
+ fn to_i64(&self) -> Option<i64> {
+ self.to_u64().as_ref().and_then(u64::to_i64)
+ }
+
+ #[inline]
+ fn to_i128(&self) -> Option<i128> {
+ self.to_u128().as_ref().and_then(u128::to_i128)
+ }
+
+ #[allow(clippy::useless_conversion)]
+ #[inline]
+ fn to_u64(&self) -> Option<u64> {
+ let mut ret: u64 = 0;
+ let mut bits = 0;
+
+ for i in self.data.iter() {
+ if bits >= 64 {
+ return None;
+ }
+
+ // XXX Conversion is useless if already 64-bit.
+ ret += u64::from(*i) << bits;
+ bits += big_digit::BITS;
+ }
+
+ Some(ret)
+ }
+
+ #[inline]
+ fn to_u128(&self) -> Option<u128> {
+ let mut ret: u128 = 0;
+ let mut bits = 0;
+
+ for i in self.data.iter() {
+ if bits >= 128 {
+ return None;
+ }
+
+ ret |= u128::from(*i) << bits;
+ bits += big_digit::BITS;
+ }
+
+ Some(ret)
+ }
+
+ #[inline]
+ fn to_f32(&self) -> Option<f32> {
+ let mantissa = high_bits_to_u64(self);
+ let exponent = self.bits() - u64::from(fls(mantissa));
+
+ if exponent > core::f32::MAX_EXP as u64 {
+ Some(core::f32::INFINITY)
+ } else {
+ Some((mantissa as f32) * 2.0f32.powi(exponent as i32))
+ }
+ }
+
+ #[inline]
+ fn to_f64(&self) -> Option<f64> {
+ let mantissa = high_bits_to_u64(self);
+ let exponent = self.bits() - u64::from(fls(mantissa));
+
+ if exponent > core::f64::MAX_EXP as u64 {
+ Some(core::f64::INFINITY)
+ } else {
+ Some((mantissa as f64) * 2.0f64.powi(exponent as i32))
+ }
+ }
+}
+
+macro_rules! impl_try_from_biguint {
+ ($T:ty, $to_ty:path) => {
+ #[cfg(has_try_from)]
+ impl TryFrom<&BigUint> for $T {
+ type Error = TryFromBigIntError<()>;
+
+ #[inline]
+ fn try_from(value: &BigUint) -> Result<$T, TryFromBigIntError<()>> {
+ $to_ty(value).ok_or(TryFromBigIntError::new(()))
+ }
+ }
+
+ #[cfg(has_try_from)]
+ impl TryFrom<BigUint> for $T {
+ type Error = TryFromBigIntError<BigUint>;
+
+ #[inline]
+ fn try_from(value: BigUint) -> Result<$T, TryFromBigIntError<BigUint>> {
+ <$T>::try_from(&value).map_err(|_| TryFromBigIntError::new(value))
+ }
+ }
+ };
+}
+
+impl_try_from_biguint!(u8, ToPrimitive::to_u8);
+impl_try_from_biguint!(u16, ToPrimitive::to_u16);
+impl_try_from_biguint!(u32, ToPrimitive::to_u32);
+impl_try_from_biguint!(u64, ToPrimitive::to_u64);
+impl_try_from_biguint!(usize, ToPrimitive::to_usize);
+impl_try_from_biguint!(u128, ToPrimitive::to_u128);
+
+impl_try_from_biguint!(i8, ToPrimitive::to_i8);
+impl_try_from_biguint!(i16, ToPrimitive::to_i16);
+impl_try_from_biguint!(i32, ToPrimitive::to_i32);
+impl_try_from_biguint!(i64, ToPrimitive::to_i64);
+impl_try_from_biguint!(isize, ToPrimitive::to_isize);
+impl_try_from_biguint!(i128, ToPrimitive::to_i128);
+
+impl FromPrimitive for BigUint {
+ #[inline]
+ fn from_i64(n: i64) -> Option<BigUint> {
+ if n >= 0 {
+ Some(BigUint::from(n as u64))
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ fn from_i128(n: i128) -> Option<BigUint> {
+ if n >= 0 {
+ Some(BigUint::from(n as u128))
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ fn from_u64(n: u64) -> Option<BigUint> {
+ Some(BigUint::from(n))
+ }
+
+ #[inline]
+ fn from_u128(n: u128) -> Option<BigUint> {
+ Some(BigUint::from(n))
+ }
+
+ #[inline]
+ fn from_f64(mut n: f64) -> Option<BigUint> {
+ // handle NAN, INFINITY, NEG_INFINITY
+ if !n.is_finite() {
+ return None;
+ }
+
+ // match the rounding of casting from float to int
+ n = n.trunc();
+
+ // handle 0.x, -0.x
+ if n.is_zero() {
+ return Some(BigUint::zero());
+ }
+
+ let (mantissa, exponent, sign) = FloatCore::integer_decode(n);
+
+ if sign == -1 {
+ return None;
+ }
+
+ let mut ret = BigUint::from(mantissa);
+ match exponent.cmp(&0) {
+ Greater => ret <<= exponent as usize,
+ Equal => {}
+ Less => ret >>= (-exponent) as usize,
+ }
+ Some(ret)
+ }
+}
+
+impl From<u64> for BigUint {
+ #[inline]
+ fn from(mut n: u64) -> Self {
+ let mut ret: BigUint = Zero::zero();
+
+ while n != 0 {
+ ret.data.push(n as BigDigit);
+ // don't overflow if BITS is 64:
+ n = (n >> 1) >> (big_digit::BITS - 1);
+ }
+
+ ret
+ }
+}
+
+impl From<u128> for BigUint {
+ #[inline]
+ fn from(mut n: u128) -> Self {
+ let mut ret: BigUint = Zero::zero();
+
+ while n != 0 {
+ ret.data.push(n as BigDigit);
+ n >>= big_digit::BITS;
+ }
+
+ ret
+ }
+}
+
+macro_rules! impl_biguint_from_uint {
+ ($T:ty) => {
+ impl From<$T> for BigUint {
+ #[inline]
+ fn from(n: $T) -> Self {
+ BigUint::from(n as u64)
+ }
+ }
+ };
+}
+
+impl_biguint_from_uint!(u8);
+impl_biguint_from_uint!(u16);
+impl_biguint_from_uint!(u32);
+impl_biguint_from_uint!(usize);
+
+macro_rules! impl_biguint_try_from_int {
+ ($T:ty, $from_ty:path) => {
+ #[cfg(has_try_from)]
+ impl TryFrom<$T> for BigUint {
+ type Error = TryFromBigIntError<()>;
+
+ #[inline]
+ fn try_from(value: $T) -> Result<BigUint, TryFromBigIntError<()>> {
+ $from_ty(value).ok_or(TryFromBigIntError::new(()))
+ }
+ }
+ };
+}
+
+impl_biguint_try_from_int!(i8, FromPrimitive::from_i8);
+impl_biguint_try_from_int!(i16, FromPrimitive::from_i16);
+impl_biguint_try_from_int!(i32, FromPrimitive::from_i32);
+impl_biguint_try_from_int!(i64, FromPrimitive::from_i64);
+impl_biguint_try_from_int!(isize, FromPrimitive::from_isize);
+impl_biguint_try_from_int!(i128, FromPrimitive::from_i128);
+
+impl ToBigUint for BigUint {
+ #[inline]
+ fn to_biguint(&self) -> Option<BigUint> {
+ Some(self.clone())
+ }
+}
+
+macro_rules! impl_to_biguint {
+ ($T:ty, $from_ty:path) => {
+ impl ToBigUint for $T {
+ #[inline]
+ fn to_biguint(&self) -> Option<BigUint> {
+ $from_ty(*self)
+ }
+ }
+ };
+}
+
+impl_to_biguint!(isize, FromPrimitive::from_isize);
+impl_to_biguint!(i8, FromPrimitive::from_i8);
+impl_to_biguint!(i16, FromPrimitive::from_i16);
+impl_to_biguint!(i32, FromPrimitive::from_i32);
+impl_to_biguint!(i64, FromPrimitive::from_i64);
+impl_to_biguint!(i128, FromPrimitive::from_i128);
+
+impl_to_biguint!(usize, FromPrimitive::from_usize);
+impl_to_biguint!(u8, FromPrimitive::from_u8);
+impl_to_biguint!(u16, FromPrimitive::from_u16);
+impl_to_biguint!(u32, FromPrimitive::from_u32);
+impl_to_biguint!(u64, FromPrimitive::from_u64);
+impl_to_biguint!(u128, FromPrimitive::from_u128);
+
+impl_to_biguint!(f32, FromPrimitive::from_f32);
+impl_to_biguint!(f64, FromPrimitive::from_f64);
+
+// Extract bitwise digits that evenly divide BigDigit
+pub(super) fn to_bitwise_digits_le(u: &BigUint, bits: u8) -> Vec<u8> {
+ debug_assert!(!u.is_zero() && bits <= 8 && big_digit::BITS % bits == 0);
+
+ let last_i = u.data.len() - 1;
+ let mask: BigDigit = (1 << bits) - 1;
+ let digits_per_big_digit = big_digit::BITS / bits;
+ let digits = u
+ .bits()
+ .div_ceil(&u64::from(bits))
+ .to_usize()
+ .unwrap_or(core::usize::MAX);
+ let mut res = Vec::with_capacity(digits);
+
+ for mut r in u.data[..last_i].iter().cloned() {
+ for _ in 0..digits_per_big_digit {
+ res.push((r & mask) as u8);
+ r >>= bits;
+ }
+ }
+
+ let mut r = u.data[last_i];
+ while r != 0 {
+ res.push((r & mask) as u8);
+ r >>= bits;
+ }
+
+ res
+}
+
+// Extract bitwise digits that don't evenly divide BigDigit
+fn to_inexact_bitwise_digits_le(u: &BigUint, bits: u8) -> Vec<u8> {
+ debug_assert!(!u.is_zero() && bits <= 8 && big_digit::BITS % bits != 0);
+
+ let mask: BigDigit = (1 << bits) - 1;
+ let digits = u
+ .bits()
+ .div_ceil(&u64::from(bits))
+ .to_usize()
+ .unwrap_or(core::usize::MAX);
+ let mut res = Vec::with_capacity(digits);
+
+ let mut r = 0;
+ let mut rbits = 0;
+
+ for c in &u.data {
+ r |= *c << rbits;
+ rbits += big_digit::BITS;
+
+ while rbits >= bits {
+ res.push((r & mask) as u8);
+ r >>= bits;
+
+ // r had more bits than it could fit - grab the bits we lost
+ if rbits > big_digit::BITS {
+ r = *c >> (big_digit::BITS - (rbits - bits));
+ }
+
+ rbits -= bits;
+ }
+ }
+
+ if rbits != 0 {
+ res.push(r as u8);
+ }
+
+ while let Some(&0) = res.last() {
+ res.pop();
+ }
+
+ res
+}
+
+// Extract little-endian radix digits
+#[inline(always)] // forced inline to get const-prop for radix=10
+pub(super) fn to_radix_digits_le(u: &BigUint, radix: u32) -> Vec<u8> {
+ debug_assert!(!u.is_zero() && !radix.is_power_of_two());
+
+ #[cfg(feature = "std")]
+ let radix_log2 = f64::from(radix).log2();
+ #[cfg(not(feature = "std"))]
+ let radix_log2 = ilog2(radix) as f64;
+
+ // Estimate how big the result will be, so we can pre-allocate it.
+ let radix_digits = ((u.bits() as f64) / radix_log2).ceil();
+ let mut res = Vec::with_capacity(radix_digits.to_usize().unwrap_or(0));
+
+ let mut digits = u.clone();
+
+ let (base, power) = get_radix_base(radix, big_digit::HALF_BITS);
+ let radix = radix as BigDigit;
+
+ while digits.data.len() > 1 {
+ let (q, mut r) = div_rem_digit(digits, base);
+ for _ in 0..power {
+ res.push((r % radix) as u8);
+ r /= radix;
+ }
+ digits = q;
+ }
+
+ let mut r = digits.data[0];
+ while r != 0 {
+ res.push((r % radix) as u8);
+ r /= radix;
+ }
+
+ res
+}
+
+pub(super) fn to_radix_le(u: &BigUint, radix: u32) -> Vec<u8> {
+ if u.is_zero() {
+ vec![0]
+ } else if radix.is_power_of_two() {
+ // Powers of two can use bitwise masks and shifting instead of division
+ let bits = ilog2(radix);
+ if big_digit::BITS % bits == 0 {
+ to_bitwise_digits_le(u, bits)
+ } else {
+ to_inexact_bitwise_digits_le(u, bits)
+ }
+ } else if radix == 10 {
+ // 10 is so common that it's worth separating out for const-propagation.
+ // Optimizers can often turn constant division into a faster multiplication.
+ to_radix_digits_le(u, 10)
+ } else {
+ to_radix_digits_le(u, radix)
+ }
+}
+
+pub(crate) fn to_str_radix_reversed(u: &BigUint, radix: u32) -> Vec<u8> {
+ assert!(2 <= radix && radix <= 36, "The radix must be within 2...36");
+
+ if u.is_zero() {
+ return vec![b'0'];
+ }
+
+ let mut res = to_radix_le(u, radix);
+
+ // Now convert everything to ASCII digits.
+ for r in &mut res {
+ debug_assert!(u32::from(*r) < radix);
+ if *r < 10 {
+ *r += b'0';
+ } else {
+ *r += b'a' - 10;
+ }
+ }
+ res
+}
+
+/// Returns the greatest power of the radix for the given bit size
+#[inline]
+fn get_radix_base(radix: u32, bits: u8) -> (BigDigit, usize) {
+ mod gen {
+ include! { concat!(env!("OUT_DIR"), "/radix_bases.rs") }
+ }
+
+ debug_assert!(
+ 2 <= radix && radix <= 256,
+ "The radix must be within 2...256"
+ );
+ debug_assert!(!radix.is_power_of_two());
+ debug_assert!(bits <= big_digit::BITS);
+
+ match bits {
+ 16 => {
+ let (base, power) = gen::BASES_16[radix as usize];
+ (base as BigDigit, power)
+ }
+ 32 => {
+ let (base, power) = gen::BASES_32[radix as usize];
+ (base as BigDigit, power)
+ }
+ 64 => {
+ let (base, power) = gen::BASES_64[radix as usize];
+ (base as BigDigit, power)
+ }
+ _ => panic!("Invalid bigdigit size"),
+ }
+}
diff --git a/src/biguint/division.rs b/src/biguint/division.rs
new file mode 100644
index 0000000..030b185
--- /dev/null
+++ b/src/biguint/division.rs
@@ -0,0 +1,615 @@
+use super::addition::__add2;
+#[cfg(not(u64_digit))]
+use super::u32_to_u128;
+use super::BigUint;
+
+use crate::big_digit::{self, BigDigit, DoubleBigDigit};
+use crate::UsizePromotion;
+
+use core::cmp::Ordering::{Equal, Greater, Less};
+use core::mem;
+use core::ops::{Div, DivAssign, Rem, RemAssign};
+use num_integer::Integer;
+use num_traits::{CheckedDiv, One, ToPrimitive, Zero};
+
+/// Divide a two digit numerator by a one digit divisor, returns quotient and remainder:
+///
+/// Note: the caller must ensure that both the quotient and remainder will fit into a single digit.
+/// This is _not_ true for an arbitrary numerator/denominator.
+///
+/// (This function also matches what the x86 divide instruction does).
+#[inline]
+fn div_wide(hi: BigDigit, lo: BigDigit, divisor: BigDigit) -> (BigDigit, BigDigit) {
+ debug_assert!(hi < divisor);
+
+ let lhs = big_digit::to_doublebigdigit(hi, lo);
+ let rhs = DoubleBigDigit::from(divisor);
+ ((lhs / rhs) as BigDigit, (lhs % rhs) as BigDigit)
+}
+
+/// For small divisors, we can divide without promoting to `DoubleBigDigit` by
+/// using half-size pieces of digit, like long-division.
+#[inline]
+fn div_half(rem: BigDigit, digit: BigDigit, divisor: BigDigit) -> (BigDigit, BigDigit) {
+ use crate::big_digit::{HALF, HALF_BITS};
+
+ debug_assert!(rem < divisor && divisor <= HALF);
+ let (hi, rem) = ((rem << HALF_BITS) | (digit >> HALF_BITS)).div_rem(&divisor);
+ let (lo, rem) = ((rem << HALF_BITS) | (digit & HALF)).div_rem(&divisor);
+ ((hi << HALF_BITS) | lo, rem)
+}
+
+#[inline]
+pub(super) fn div_rem_digit(mut a: BigUint, b: BigDigit) -> (BigUint, BigDigit) {
+ let mut rem = 0;
+
+ if b <= big_digit::HALF {
+ for d in a.data.iter_mut().rev() {
+ let (q, r) = div_half(rem, *d, b);
+ *d = q;
+ rem = r;
+ }
+ } else {
+ for d in a.data.iter_mut().rev() {
+ let (q, r) = div_wide(rem, *d, b);
+ *d = q;
+ rem = r;
+ }
+ }
+
+ (a.normalized(), rem)
+}
+
+#[inline]
+fn rem_digit(a: &BigUint, b: BigDigit) -> BigDigit {
+ let mut rem = 0;
+
+ if b <= big_digit::HALF {
+ for &digit in a.data.iter().rev() {
+ let (_, r) = div_half(rem, digit, b);
+ rem = r;
+ }
+ } else {
+ for &digit in a.data.iter().rev() {
+ let (_, r) = div_wide(rem, digit, b);
+ rem = r;
+ }
+ }
+
+ rem
+}
+
+/// Subtract a multiple.
+/// a -= b * c
+/// Returns a borrow (if a < b then borrow > 0).
+fn sub_mul_digit_same_len(a: &mut [BigDigit], b: &[BigDigit], c: BigDigit) -> BigDigit {
+ debug_assert!(a.len() == b.len());
+
+ // carry is between -big_digit::MAX and 0, so to avoid overflow we store
+ // offset_carry = carry + big_digit::MAX
+ let mut offset_carry = big_digit::MAX;
+
+ for (x, y) in a.iter_mut().zip(b) {
+ // We want to calculate sum = x - y * c + carry.
+ // sum >= -(big_digit::MAX * big_digit::MAX) - big_digit::MAX
+ // sum <= big_digit::MAX
+ // Offsetting sum by (big_digit::MAX << big_digit::BITS) puts it in DoubleBigDigit range.
+ let offset_sum = big_digit::to_doublebigdigit(big_digit::MAX, *x)
+ - big_digit::MAX as DoubleBigDigit
+ + offset_carry as DoubleBigDigit
+ - *y as DoubleBigDigit * c as DoubleBigDigit;
+
+ let (new_offset_carry, new_x) = big_digit::from_doublebigdigit(offset_sum);
+ offset_carry = new_offset_carry;
+ *x = new_x;
+ }
+
+ // Return the borrow.
+ big_digit::MAX - offset_carry
+}
+
+fn div_rem(mut u: BigUint, mut d: BigUint) -> (BigUint, BigUint) {
+ if d.is_zero() {
+ panic!("attempt to divide by zero")
+ }
+ if u.is_zero() {
+ return (Zero::zero(), Zero::zero());
+ }
+
+ if d.data.len() == 1 {
+ if d.data == [1] {
+ return (u, Zero::zero());
+ }
+ let (div, rem) = div_rem_digit(u, d.data[0]);
+ // reuse d
+ d.data.clear();
+ d += rem;
+ return (div, d);
+ }
+
+ // Required or the q_len calculation below can underflow:
+ match u.cmp(&d) {
+ Less => return (Zero::zero(), u),
+ Equal => {
+ u.set_one();
+ return (u, Zero::zero());
+ }
+ Greater => {} // Do nothing
+ }
+
+ // This algorithm is from Knuth, TAOCP vol 2 section 4.3, algorithm D:
+ //
+ // First, normalize the arguments so the highest bit in the highest digit of the divisor is
+ // set: the main loop uses the highest digit of the divisor for generating guesses, so we
+ // want it to be the largest number we can efficiently divide by.
+ //
+ let shift = d.data.last().unwrap().leading_zeros() as usize;
+
+ let (q, r) = if shift == 0 {
+ // no need to clone d
+ div_rem_core(u, &d)
+ } else {
+ div_rem_core(u << shift, &(d << shift))
+ };
+ // renormalize the remainder
+ (q, r >> shift)
+}
+
+pub(super) fn div_rem_ref(u: &BigUint, d: &BigUint) -> (BigUint, BigUint) {
+ if d.is_zero() {
+ panic!("attempt to divide by zero")
+ }
+ if u.is_zero() {
+ return (Zero::zero(), Zero::zero());
+ }
+
+ if d.data.len() == 1 {
+ if d.data == [1] {
+ return (u.clone(), Zero::zero());
+ }
+
+ let (div, rem) = div_rem_digit(u.clone(), d.data[0]);
+ return (div, rem.into());
+ }
+
+ // Required or the q_len calculation below can underflow:
+ match u.cmp(d) {
+ Less => return (Zero::zero(), u.clone()),
+ Equal => return (One::one(), Zero::zero()),
+ Greater => {} // Do nothing
+ }
+
+ // This algorithm is from Knuth, TAOCP vol 2 section 4.3, algorithm D:
+ //
+ // First, normalize the arguments so the highest bit in the highest digit of the divisor is
+ // set: the main loop uses the highest digit of the divisor for generating guesses, so we
+ // want it to be the largest number we can efficiently divide by.
+ //
+ let shift = d.data.last().unwrap().leading_zeros() as usize;
+
+ let (q, r) = if shift == 0 {
+ // no need to clone d
+ div_rem_core(u.clone(), d)
+ } else {
+ div_rem_core(u << shift, &(d << shift))
+ };
+ // renormalize the remainder
+ (q, r >> shift)
+}
+
+/// An implementation of the base division algorithm.
+/// Knuth, TAOCP vol 2 section 4.3.1, algorithm D, with an improvement from exercises 19-21.
+fn div_rem_core(mut a: BigUint, b: &BigUint) -> (BigUint, BigUint) {
+ debug_assert!(
+ a.data.len() >= b.data.len()
+ && b.data.len() > 1
+ && b.data.last().unwrap().leading_zeros() == 0
+ );
+
+ // The algorithm works by incrementally calculating "guesses", q0, for the next digit of the
+ // quotient. Once we have any number q0 such that (q0 << j) * b <= a, we can set
+ //
+ // q += q0 << j
+ // a -= (q0 << j) * b
+ //
+ // and then iterate until a < b. Then, (q, a) will be our desired quotient and remainder.
+ //
+ // q0, our guess, is calculated by dividing the last three digits of a by the last two digits of
+ // b - this will give us a guess that is close to the actual quotient, but is possibly greater.
+ // It can only be greater by 1 and only in rare cases, with probability at most
+ // 2^-(big_digit::BITS-1) for random a, see TAOCP 4.3.1 exercise 21.
+ //
+ // If the quotient turns out to be too large, we adjust it by 1:
+ // q -= 1 << j
+ // a += b << j
+
+ // a0 stores an additional extra most significant digit of the dividend, not stored in a.
+ let mut a0 = 0;
+
+ // [b1, b0] are the two most significant digits of the divisor. They never change.
+ let b0 = *b.data.last().unwrap();
+ let b1 = b.data[b.data.len() - 2];
+
+ let q_len = a.data.len() - b.data.len() + 1;
+ let mut q = BigUint {
+ data: vec![0; q_len],
+ };
+
+ for j in (0..q_len).rev() {
+ debug_assert!(a.data.len() == b.data.len() + j);
+
+ let a1 = *a.data.last().unwrap();
+ let a2 = a.data[a.data.len() - 2];
+
+ // The first q0 estimate is [a1,a0] / b0. It will never be too small, it may be too large
+ // by at most 2.
+ let (mut q0, mut r) = if a0 < b0 {
+ let (q0, r) = div_wide(a0, a1, b0);
+ (q0, r as DoubleBigDigit)
+ } else {
+ debug_assert!(a0 == b0);
+ // Avoid overflowing q0, we know the quotient fits in BigDigit.
+ // [a1,a0] = b0 * (1<<BITS - 1) + (a0 + a1)
+ (big_digit::MAX, a0 as DoubleBigDigit + a1 as DoubleBigDigit)
+ };
+
+ // r = [a1,a0] - q0 * b0
+ //
+ // Now we want to compute a more precise estimate [a2,a1,a0] / [b1,b0] which can only be
+ // less or equal to the current q0.
+ //
+ // q0 is too large if:
+ // [a2,a1,a0] < q0 * [b1,b0]
+ // (r << BITS) + a2 < q0 * b1
+ while r <= big_digit::MAX as DoubleBigDigit
+ && big_digit::to_doublebigdigit(r as BigDigit, a2)
+ < q0 as DoubleBigDigit * b1 as DoubleBigDigit
+ {
+ q0 -= 1;
+ r += b0 as DoubleBigDigit;
+ }
+
+ // q0 is now either the correct quotient digit, or in rare cases 1 too large.
+ // Subtract (q0 << j) from a. This may overflow, in which case we will have to correct.
+
+ let mut borrow = sub_mul_digit_same_len(&mut a.data[j..], &b.data, q0);
+ if borrow > a0 {
+ // q0 is too large. We need to add back one multiple of b.
+ q0 -= 1;
+ borrow -= __add2(&mut a.data[j..], &b.data);
+ }
+ // The top digit of a, stored in a0, has now been zeroed.
+ debug_assert!(borrow == a0);
+
+ q.data[j] = q0;
+
+ // Pop off the next top digit of a.
+ a0 = a.data.pop().unwrap();
+ }
+
+ a.data.push(a0);
+ a.normalize();
+
+ debug_assert!(a < *b);
+
+ (q.normalized(), a)
+}
+
+forward_val_ref_binop!(impl Div for BigUint, div);
+forward_ref_val_binop!(impl Div for BigUint, div);
+forward_val_assign!(impl DivAssign for BigUint, div_assign);
+
+impl Div<BigUint> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn div(self, other: BigUint) -> BigUint {
+ let (q, _) = div_rem(self, other);
+ q
+ }
+}
+
+impl<'a, 'b> Div<&'b BigUint> for &'a BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn div(self, other: &BigUint) -> BigUint {
+ let (q, _) = self.div_rem(other);
+ q
+ }
+}
+impl<'a> DivAssign<&'a BigUint> for BigUint {
+ #[inline]
+ fn div_assign(&mut self, other: &'a BigUint) {
+ *self = &*self / other;
+ }
+}
+
+promote_unsigned_scalars!(impl Div for BigUint, div);
+promote_unsigned_scalars_assign!(impl DivAssign for BigUint, div_assign);
+forward_all_scalar_binop_to_val_val!(impl Div<u32> for BigUint, div);
+forward_all_scalar_binop_to_val_val!(impl Div<u64> for BigUint, div);
+forward_all_scalar_binop_to_val_val!(impl Div<u128> for BigUint, div);
+
+impl Div<u32> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn div(self, other: u32) -> BigUint {
+ let (q, _) = div_rem_digit(self, other as BigDigit);
+ q
+ }
+}
+impl DivAssign<u32> for BigUint {
+ #[inline]
+ fn div_assign(&mut self, other: u32) {
+ *self = &*self / other;
+ }
+}
+
+impl Div<BigUint> for u32 {
+ type Output = BigUint;
+
+ #[inline]
+ fn div(self, other: BigUint) -> BigUint {
+ match other.data.len() {
+ 0 => panic!("attempt to divide by zero"),
+ 1 => From::from(self as BigDigit / other.data[0]),
+ _ => Zero::zero(),
+ }
+ }
+}
+
+impl Div<u64> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn div(self, other: u64) -> BigUint {
+ let (q, _) = div_rem(self, From::from(other));
+ q
+ }
+}
+impl DivAssign<u64> for BigUint {
+ #[inline]
+ fn div_assign(&mut self, other: u64) {
+ // a vec of size 0 does not allocate, so this is fairly cheap
+ let temp = mem::replace(self, Zero::zero());
+ *self = temp / other;
+ }
+}
+
+impl Div<BigUint> for u64 {
+ type Output = BigUint;
+
+ #[cfg(not(u64_digit))]
+ #[inline]
+ fn div(self, other: BigUint) -> BigUint {
+ match other.data.len() {
+ 0 => panic!("attempt to divide by zero"),
+ 1 => From::from(self / u64::from(other.data[0])),
+ 2 => From::from(self / big_digit::to_doublebigdigit(other.data[1], other.data[0])),
+ _ => Zero::zero(),
+ }
+ }
+
+ #[cfg(u64_digit)]
+ #[inline]
+ fn div(self, other: BigUint) -> BigUint {
+ match other.data.len() {
+ 0 => panic!("attempt to divide by zero"),
+ 1 => From::from(self / other.data[0]),
+ _ => Zero::zero(),
+ }
+ }
+}
+
+impl Div<u128> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn div(self, other: u128) -> BigUint {
+ let (q, _) = div_rem(self, From::from(other));
+ q
+ }
+}
+
+impl DivAssign<u128> for BigUint {
+ #[inline]
+ fn div_assign(&mut self, other: u128) {
+ *self = &*self / other;
+ }
+}
+
+impl Div<BigUint> for u128 {
+ type Output = BigUint;
+
+ #[cfg(not(u64_digit))]
+ #[inline]
+ fn div(self, other: BigUint) -> BigUint {
+ match other.data.len() {
+ 0 => panic!("attempt to divide by zero"),
+ 1 => From::from(self / u128::from(other.data[0])),
+ 2 => From::from(
+ self / u128::from(big_digit::to_doublebigdigit(other.data[1], other.data[0])),
+ ),
+ 3 => From::from(self / u32_to_u128(0, other.data[2], other.data[1], other.data[0])),
+ 4 => From::from(
+ self / u32_to_u128(other.data[3], other.data[2], other.data[1], other.data[0]),
+ ),
+ _ => Zero::zero(),
+ }
+ }
+
+ #[cfg(u64_digit)]
+ #[inline]
+ fn div(self, other: BigUint) -> BigUint {
+ match other.data.len() {
+ 0 => panic!("attempt to divide by zero"),
+ 1 => From::from(self / other.data[0] as u128),
+ 2 => From::from(self / big_digit::to_doublebigdigit(other.data[1], other.data[0])),
+ _ => Zero::zero(),
+ }
+ }
+}
+
+forward_val_ref_binop!(impl Rem for BigUint, rem);
+forward_ref_val_binop!(impl Rem for BigUint, rem);
+forward_val_assign!(impl RemAssign for BigUint, rem_assign);
+
+impl Rem<BigUint> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn rem(self, other: BigUint) -> BigUint {
+ if let Some(other) = other.to_u32() {
+ &self % other
+ } else {
+ let (_, r) = div_rem(self, other);
+ r
+ }
+ }
+}
+
+impl<'a, 'b> Rem<&'b BigUint> for &'a BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn rem(self, other: &BigUint) -> BigUint {
+ if let Some(other) = other.to_u32() {
+ self % other
+ } else {
+ let (_, r) = self.div_rem(other);
+ r
+ }
+ }
+}
+impl<'a> RemAssign<&'a BigUint> for BigUint {
+ #[inline]
+ fn rem_assign(&mut self, other: &BigUint) {
+ *self = &*self % other;
+ }
+}
+
+promote_unsigned_scalars!(impl Rem for BigUint, rem);
+promote_unsigned_scalars_assign!(impl RemAssign for BigUint, rem_assign);
+forward_all_scalar_binop_to_ref_val!(impl Rem<u32> for BigUint, rem);
+forward_all_scalar_binop_to_val_val!(impl Rem<u64> for BigUint, rem);
+forward_all_scalar_binop_to_val_val!(impl Rem<u128> for BigUint, rem);
+
+impl<'a> Rem<u32> for &'a BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn rem(self, other: u32) -> BigUint {
+ rem_digit(self, other as BigDigit).into()
+ }
+}
+impl RemAssign<u32> for BigUint {
+ #[inline]
+ fn rem_assign(&mut self, other: u32) {
+ *self = &*self % other;
+ }
+}
+
+impl<'a> Rem<&'a BigUint> for u32 {
+ type Output = BigUint;
+
+ #[inline]
+ fn rem(mut self, other: &'a BigUint) -> BigUint {
+ self %= other;
+ From::from(self)
+ }
+}
+
+macro_rules! impl_rem_assign_scalar {
+ ($scalar:ty, $to_scalar:ident) => {
+ forward_val_assign_scalar!(impl RemAssign for BigUint, $scalar, rem_assign);
+ impl<'a> RemAssign<&'a BigUint> for $scalar {
+ #[inline]
+ fn rem_assign(&mut self, other: &BigUint) {
+ *self = match other.$to_scalar() {
+ None => *self,
+ Some(0) => panic!("attempt to divide by zero"),
+ Some(v) => *self % v
+ };
+ }
+ }
+ }
+}
+
+// we can scalar %= BigUint for any scalar, including signed types
+impl_rem_assign_scalar!(u128, to_u128);
+impl_rem_assign_scalar!(usize, to_usize);
+impl_rem_assign_scalar!(u64, to_u64);
+impl_rem_assign_scalar!(u32, to_u32);
+impl_rem_assign_scalar!(u16, to_u16);
+impl_rem_assign_scalar!(u8, to_u8);
+impl_rem_assign_scalar!(i128, to_i128);
+impl_rem_assign_scalar!(isize, to_isize);
+impl_rem_assign_scalar!(i64, to_i64);
+impl_rem_assign_scalar!(i32, to_i32);
+impl_rem_assign_scalar!(i16, to_i16);
+impl_rem_assign_scalar!(i8, to_i8);
+
+impl Rem<u64> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn rem(self, other: u64) -> BigUint {
+ let (_, r) = div_rem(self, From::from(other));
+ r
+ }
+}
+impl RemAssign<u64> for BigUint {
+ #[inline]
+ fn rem_assign(&mut self, other: u64) {
+ *self = &*self % other;
+ }
+}
+
+impl Rem<BigUint> for u64 {
+ type Output = BigUint;
+
+ #[inline]
+ fn rem(mut self, other: BigUint) -> BigUint {
+ self %= other;
+ From::from(self)
+ }
+}
+
+impl Rem<u128> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn rem(self, other: u128) -> BigUint {
+ let (_, r) = div_rem(self, From::from(other));
+ r
+ }
+}
+
+impl RemAssign<u128> for BigUint {
+ #[inline]
+ fn rem_assign(&mut self, other: u128) {
+ *self = &*self % other;
+ }
+}
+
+impl Rem<BigUint> for u128 {
+ type Output = BigUint;
+
+ #[inline]
+ fn rem(mut self, other: BigUint) -> BigUint {
+ self %= other;
+ From::from(self)
+ }
+}
+
+impl CheckedDiv for BigUint {
+ #[inline]
+ fn checked_div(&self, v: &BigUint) -> Option<BigUint> {
+ if v.is_zero() {
+ return None;
+ }
+ Some(self.div(v))
+ }
+}
diff --git a/src/biguint/iter.rs b/src/biguint/iter.rs
new file mode 100644
index 0000000..5b9ceff
--- /dev/null
+++ b/src/biguint/iter.rs
@@ -0,0 +1,271 @@
+use core::iter::FusedIterator;
+
+#[cfg(not(u64_digit))]
+use super::u32_chunk_to_u64;
+
+/// An iterator of `u32` digits representation of a `BigUint` or `BigInt`,
+/// ordered least significant digit first.
+pub struct U32Digits<'a> {
+ #[cfg(u64_digit)]
+ data: &'a [u64],
+ #[cfg(u64_digit)]
+ next_is_lo: bool,
+ #[cfg(u64_digit)]
+ last_hi_is_zero: bool,
+
+ #[cfg(not(u64_digit))]
+ it: core::slice::Iter<'a, u32>,
+}
+
+#[cfg(u64_digit)]
+impl<'a> U32Digits<'a> {
+ #[inline]
+ pub(super) fn new(data: &'a [u64]) -> Self {
+ let last_hi_is_zero = data
+ .last()
+ .map(|&last| {
+ let last_hi = (last >> 32) as u32;
+ last_hi == 0
+ })
+ .unwrap_or(false);
+ U32Digits {
+ data,
+ next_is_lo: true,
+ last_hi_is_zero,
+ }
+ }
+}
+
+#[cfg(u64_digit)]
+impl Iterator for U32Digits<'_> {
+ type Item = u32;
+ #[inline]
+ fn next(&mut self) -> Option<u32> {
+ match self.data.split_first() {
+ Some((&first, data)) => {
+ let next_is_lo = self.next_is_lo;
+ self.next_is_lo = !next_is_lo;
+ if next_is_lo {
+ Some(first as u32)
+ } else {
+ self.data = data;
+ if data.is_empty() && self.last_hi_is_zero {
+ self.last_hi_is_zero = false;
+ None
+ } else {
+ Some((first >> 32) as u32)
+ }
+ }
+ }
+ None => None,
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let len = self.len();
+ (len, Some(len))
+ }
+
+ #[inline]
+ fn last(self) -> Option<u32> {
+ self.data.last().map(|&last| {
+ if self.last_hi_is_zero {
+ last as u32
+ } else {
+ (last >> 32) as u32
+ }
+ })
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.len()
+ }
+}
+
+#[cfg(u64_digit)]
+impl ExactSizeIterator for U32Digits<'_> {
+ #[inline]
+ fn len(&self) -> usize {
+ self.data.len() * 2 - usize::from(self.last_hi_is_zero) - usize::from(!self.next_is_lo)
+ }
+}
+
+#[cfg(not(u64_digit))]
+impl<'a> U32Digits<'a> {
+ #[inline]
+ pub(super) fn new(data: &'a [u32]) -> Self {
+ Self { it: data.iter() }
+ }
+}
+
+#[cfg(not(u64_digit))]
+impl Iterator for U32Digits<'_> {
+ type Item = u32;
+ #[inline]
+ fn next(&mut self) -> Option<u32> {
+ self.it.next().cloned()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.it.size_hint()
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<u32> {
+ self.it.nth(n).cloned()
+ }
+
+ #[inline]
+ fn last(self) -> Option<u32> {
+ self.it.last().cloned()
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.it.count()
+ }
+}
+
+#[cfg(not(u64_digit))]
+impl ExactSizeIterator for U32Digits<'_> {
+ #[inline]
+ fn len(&self) -> usize {
+ self.it.len()
+ }
+}
+
+impl FusedIterator for U32Digits<'_> {}
+
+/// An iterator of `u64` digits representation of a `BigUint` or `BigInt`,
+/// ordered least significant digit first.
+pub struct U64Digits<'a> {
+ #[cfg(not(u64_digit))]
+ it: core::slice::Chunks<'a, u32>,
+
+ #[cfg(u64_digit)]
+ it: core::slice::Iter<'a, u64>,
+}
+
+#[cfg(not(u64_digit))]
+impl<'a> U64Digits<'a> {
+ #[inline]
+ pub(super) fn new(data: &'a [u32]) -> Self {
+ U64Digits { it: data.chunks(2) }
+ }
+}
+
+#[cfg(not(u64_digit))]
+impl Iterator for U64Digits<'_> {
+ type Item = u64;
+ #[inline]
+ fn next(&mut self) -> Option<u64> {
+ self.it.next().map(u32_chunk_to_u64)
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let len = self.len();
+ (len, Some(len))
+ }
+
+ #[inline]
+ fn last(self) -> Option<u64> {
+ self.it.last().map(u32_chunk_to_u64)
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.len()
+ }
+}
+
+#[cfg(u64_digit)]
+impl<'a> U64Digits<'a> {
+ #[inline]
+ pub(super) fn new(data: &'a [u64]) -> Self {
+ Self { it: data.iter() }
+ }
+}
+
+#[cfg(u64_digit)]
+impl Iterator for U64Digits<'_> {
+ type Item = u64;
+ #[inline]
+ fn next(&mut self) -> Option<u64> {
+ self.it.next().cloned()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.it.size_hint()
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<u64> {
+ self.it.nth(n).cloned()
+ }
+
+ #[inline]
+ fn last(self) -> Option<u64> {
+ self.it.last().cloned()
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.it.count()
+ }
+}
+
+impl ExactSizeIterator for U64Digits<'_> {
+ #[inline]
+ fn len(&self) -> usize {
+ self.it.len()
+ }
+}
+
+impl FusedIterator for U64Digits<'_> {}
+
+#[test]
+fn test_iter_u32_digits() {
+ let n = super::BigUint::from(5u8);
+ let mut it = n.iter_u32_digits();
+ assert_eq!(it.len(), 1);
+ assert_eq!(it.next(), Some(5));
+ assert_eq!(it.len(), 0);
+ assert_eq!(it.next(), None);
+ assert_eq!(it.len(), 0);
+ assert_eq!(it.next(), None);
+
+ let n = super::BigUint::from(112500000000u64);
+ let mut it = n.iter_u32_digits();
+ assert_eq!(it.len(), 2);
+ assert_eq!(it.next(), Some(830850304));
+ assert_eq!(it.len(), 1);
+ assert_eq!(it.next(), Some(26));
+ assert_eq!(it.len(), 0);
+ assert_eq!(it.next(), None);
+}
+
+#[test]
+fn test_iter_u64_digits() {
+ let n = super::BigUint::from(5u8);
+ let mut it = n.iter_u64_digits();
+ assert_eq!(it.len(), 1);
+ assert_eq!(it.next(), Some(5));
+ assert_eq!(it.len(), 0);
+ assert_eq!(it.next(), None);
+ assert_eq!(it.len(), 0);
+ assert_eq!(it.next(), None);
+
+ let n = super::BigUint::from(18_446_744_073_709_551_616u128);
+ let mut it = n.iter_u64_digits();
+ assert_eq!(it.len(), 2);
+ assert_eq!(it.next(), Some(0));
+ assert_eq!(it.len(), 1);
+ assert_eq!(it.next(), Some(1));
+ assert_eq!(it.len(), 0);
+ assert_eq!(it.next(), None);
+}
diff --git a/src/biguint/monty.rs b/src/biguint/monty.rs
new file mode 100644
index 0000000..a5c79aa
--- /dev/null
+++ b/src/biguint/monty.rs
@@ -0,0 +1,225 @@
+use crate::std_alloc::Vec;
+use core::mem;
+use core::ops::Shl;
+use num_traits::{One, Zero};
+
+use crate::big_digit::{self, BigDigit, DoubleBigDigit, SignedDoubleBigDigit};
+use crate::biguint::BigUint;
+
+struct MontyReducer {
+ n0inv: BigDigit,
+}
+
+// k0 = -m**-1 mod 2**BITS. Algorithm from: Dumas, J.G. "On Newton–Raphson
+// Iteration for Multiplicative Inverses Modulo Prime Powers".
+fn inv_mod_alt(b: BigDigit) -> BigDigit {
+ assert_ne!(b & 1, 0);
+
+ let mut k0 = 2 - b as SignedDoubleBigDigit;
+ let mut t = (b - 1) as SignedDoubleBigDigit;
+ let mut i = 1;
+ while i < big_digit::BITS {
+ t = t.wrapping_mul(t);
+ k0 = k0.wrapping_mul(t + 1);
+
+ i <<= 1;
+ }
+ -k0 as BigDigit
+}
+
+impl MontyReducer {
+ fn new(n: &BigUint) -> Self {
+ let n0inv = inv_mod_alt(n.data[0]);
+ MontyReducer { n0inv }
+ }
+}
+
+/// Computes z mod m = x * y * 2 ** (-n*_W) mod m
+/// assuming k = -1/m mod 2**_W
+/// See Gueron, "Efficient Software Implementations of Modular Exponentiation".
+/// https://eprint.iacr.org/2011/239.pdf
+/// In the terminology of that paper, this is an "Almost Montgomery Multiplication":
+/// x and y are required to satisfy 0 <= z < 2**(n*_W) and then the result
+/// z is guaranteed to satisfy 0 <= z < 2**(n*_W), but it may not be < m.
+#[allow(clippy::many_single_char_names)]
+fn montgomery(x: &BigUint, y: &BigUint, m: &BigUint, k: BigDigit, n: usize) -> BigUint {
+ // This code assumes x, y, m are all the same length, n.
+ // (required by addMulVVW and the for loop).
+ // It also assumes that x, y are already reduced mod m,
+ // or else the result will not be properly reduced.
+ assert!(
+ x.data.len() == n && y.data.len() == n && m.data.len() == n,
+ "{:?} {:?} {:?} {}",
+ x,
+ y,
+ m,
+ n
+ );
+
+ let mut z = BigUint::zero();
+ z.data.resize(n * 2, 0);
+
+ let mut c: BigDigit = 0;
+ for i in 0..n {
+ let c2 = add_mul_vvw(&mut z.data[i..n + i], &x.data, y.data[i]);
+ let t = z.data[i].wrapping_mul(k);
+ let c3 = add_mul_vvw(&mut z.data[i..n + i], &m.data, t);
+ let cx = c.wrapping_add(c2);
+ let cy = cx.wrapping_add(c3);
+ z.data[n + i] = cy;
+ if cx < c2 || cy < c3 {
+ c = 1;
+ } else {
+ c = 0;
+ }
+ }
+
+ if c == 0 {
+ z.data = z.data[n..].to_vec();
+ } else {
+ {
+ let (mut first, second) = z.data.split_at_mut(n);
+ sub_vv(&mut first, &second, &m.data);
+ }
+ z.data = z.data[..n].to_vec();
+ }
+
+ z
+}
+
+#[inline(always)]
+fn add_mul_vvw(z: &mut [BigDigit], x: &[BigDigit], y: BigDigit) -> BigDigit {
+ let mut c = 0;
+ for (zi, xi) in z.iter_mut().zip(x.iter()) {
+ let (z1, z0) = mul_add_www(*xi, y, *zi);
+ let (c_, zi_) = add_ww(z0, c, 0);
+ *zi = zi_;
+ c = c_ + z1;
+ }
+
+ c
+}
+
+/// The resulting carry c is either 0 or 1.
+#[inline(always)]
+fn sub_vv(z: &mut [BigDigit], x: &[BigDigit], y: &[BigDigit]) -> BigDigit {
+ let mut c = 0;
+ for (i, (xi, yi)) in x.iter().zip(y.iter()).enumerate().take(z.len()) {
+ let zi = xi.wrapping_sub(*yi).wrapping_sub(c);
+ z[i] = zi;
+ // see "Hacker's Delight", section 2-12 (overflow detection)
+ c = ((yi & !xi) | ((yi | !xi) & zi)) >> (big_digit::BITS - 1)
+ }
+
+ c
+}
+
+/// z1<<_W + z0 = x+y+c, with c == 0 or 1
+#[inline(always)]
+fn add_ww(x: BigDigit, y: BigDigit, c: BigDigit) -> (BigDigit, BigDigit) {
+ let yc = y.wrapping_add(c);
+ let z0 = x.wrapping_add(yc);
+ let z1 = if z0 < x || yc < y { 1 } else { 0 };
+
+ (z1, z0)
+}
+
+/// z1 << _W + z0 = x * y + c
+#[inline(always)]
+fn mul_add_www(x: BigDigit, y: BigDigit, c: BigDigit) -> (BigDigit, BigDigit) {
+ let z = x as DoubleBigDigit * y as DoubleBigDigit + c as DoubleBigDigit;
+ ((z >> big_digit::BITS) as BigDigit, z as BigDigit)
+}
+
+/// Calculates x ** y mod m using a fixed, 4-bit window.
+#[allow(clippy::many_single_char_names)]
+pub(super) fn monty_modpow(x: &BigUint, y: &BigUint, m: &BigUint) -> BigUint {
+ assert!(m.data[0] & 1 == 1);
+ let mr = MontyReducer::new(m);
+ let num_words = m.data.len();
+
+ let mut x = x.clone();
+
+ // We want the lengths of x and m to be equal.
+ // It is OK if x >= m as long as len(x) == len(m).
+ if x.data.len() > num_words {
+ x %= m;
+ // Note: now len(x) <= numWords, not guaranteed ==.
+ }
+ if x.data.len() < num_words {
+ x.data.resize(num_words, 0);
+ }
+
+ // rr = 2**(2*_W*len(m)) mod m
+ let mut rr = BigUint::one();
+ rr = (rr.shl(2 * num_words as u64 * u64::from(big_digit::BITS))) % m;
+ if rr.data.len() < num_words {
+ rr.data.resize(num_words, 0);
+ }
+ // one = 1, with equal length to that of m
+ let mut one = BigUint::one();
+ one.data.resize(num_words, 0);
+
+ let n = 4;
+ // powers[i] contains x^i
+ let mut powers = Vec::with_capacity(1 << n);
+ powers.push(montgomery(&one, &rr, m, mr.n0inv, num_words));
+ powers.push(montgomery(&x, &rr, m, mr.n0inv, num_words));
+ for i in 2..1 << n {
+ let r = montgomery(&powers[i - 1], &powers[1], m, mr.n0inv, num_words);
+ powers.push(r);
+ }
+
+ // initialize z = 1 (Montgomery 1)
+ let mut z = powers[0].clone();
+ z.data.resize(num_words, 0);
+ let mut zz = BigUint::zero();
+ zz.data.resize(num_words, 0);
+
+ // same windowed exponent, but with Montgomery multiplications
+ for i in (0..y.data.len()).rev() {
+ let mut yi = y.data[i];
+ let mut j = 0;
+ while j < big_digit::BITS {
+ if i != y.data.len() - 1 || j != 0 {
+ zz = montgomery(&z, &z, m, mr.n0inv, num_words);
+ z = montgomery(&zz, &zz, m, mr.n0inv, num_words);
+ zz = montgomery(&z, &z, m, mr.n0inv, num_words);
+ z = montgomery(&zz, &zz, m, mr.n0inv, num_words);
+ }
+ zz = montgomery(
+ &z,
+ &powers[(yi >> (big_digit::BITS - n)) as usize],
+ m,
+ mr.n0inv,
+ num_words,
+ );
+ mem::swap(&mut z, &mut zz);
+ yi <<= n;
+ j += n;
+ }
+ }
+
+ // convert to regular number
+ zz = montgomery(&z, &one, m, mr.n0inv, num_words);
+
+ zz.normalize();
+ // One last reduction, just in case.
+ // See golang.org/issue/13907.
+ if zz >= *m {
+ // Common case is m has high bit set; in that case,
+ // since zz is the same length as m, there can be just
+ // one multiple of m to remove. Just subtract.
+ // We think that the subtract should be sufficient in general,
+ // so do that unconditionally, but double-check,
+ // in case our beliefs are wrong.
+ // The div is not expected to be reached.
+ zz -= m;
+ if zz >= *m {
+ zz %= m;
+ }
+ }
+
+ zz.normalize();
+ zz
+}
diff --git a/src/biguint/multiplication.rs b/src/biguint/multiplication.rs
new file mode 100644
index 0000000..aaa6934
--- /dev/null
+++ b/src/biguint/multiplication.rs
@@ -0,0 +1,507 @@
+use super::addition::{__add2, add2};
+use super::subtraction::sub2;
+#[cfg(not(u64_digit))]
+use super::u32_from_u128;
+use super::{biguint_from_vec, cmp_slice, BigUint};
+
+use crate::big_digit::{self, BigDigit, DoubleBigDigit};
+use crate::Sign::{self, Minus, NoSign, Plus};
+use crate::{BigInt, UsizePromotion};
+
+use core::cmp::Ordering;
+use core::iter::Product;
+use core::ops::{Mul, MulAssign};
+use num_traits::{CheckedMul, One, Zero};
+
+#[inline]
+pub(super) fn mac_with_carry(
+ a: BigDigit,
+ b: BigDigit,
+ c: BigDigit,
+ acc: &mut DoubleBigDigit,
+) -> BigDigit {
+ *acc += DoubleBigDigit::from(a);
+ *acc += DoubleBigDigit::from(b) * DoubleBigDigit::from(c);
+ let lo = *acc as BigDigit;
+ *acc >>= big_digit::BITS;
+ lo
+}
+
+#[inline]
+fn mul_with_carry(a: BigDigit, b: BigDigit, acc: &mut DoubleBigDigit) -> BigDigit {
+ *acc += DoubleBigDigit::from(a) * DoubleBigDigit::from(b);
+ let lo = *acc as BigDigit;
+ *acc >>= big_digit::BITS;
+ lo
+}
+
+/// Three argument multiply accumulate:
+/// acc += b * c
+fn mac_digit(acc: &mut [BigDigit], b: &[BigDigit], c: BigDigit) {
+ if c == 0 {
+ return;
+ }
+
+ let mut carry = 0;
+ let (a_lo, a_hi) = acc.split_at_mut(b.len());
+
+ for (a, &b) in a_lo.iter_mut().zip(b) {
+ *a = mac_with_carry(*a, b, c, &mut carry);
+ }
+
+ let (carry_hi, carry_lo) = big_digit::from_doublebigdigit(carry);
+
+ let final_carry = if carry_hi == 0 {
+ __add2(a_hi, &[carry_lo])
+ } else {
+ __add2(a_hi, &[carry_hi, carry_lo])
+ };
+ assert_eq!(final_carry, 0, "carry overflow during multiplication!");
+}
+
+fn bigint_from_slice(slice: &[BigDigit]) -> BigInt {
+ BigInt::from(biguint_from_vec(slice.to_vec()))
+}
+
+/// Three argument multiply accumulate:
+/// acc += b * c
+#[allow(clippy::many_single_char_names)]
+fn mac3(acc: &mut [BigDigit], b: &[BigDigit], c: &[BigDigit]) {
+ let (x, y) = if b.len() < c.len() { (b, c) } else { (c, b) };
+
+ // We use three algorithms for different input sizes.
+ //
+ // - For small inputs, long multiplication is fastest.
+ // - Next we use Karatsuba multiplication (Toom-2), which we have optimized
+ // to avoid unnecessary allocations for intermediate values.
+ // - For the largest inputs we use Toom-3, which better optimizes the
+ // number of operations, but uses more temporary allocations.
+ //
+ // The thresholds are somewhat arbitrary, chosen by evaluating the results
+ // of `cargo bench --bench bigint multiply`.
+
+ if x.len() <= 32 {
+ // Long multiplication:
+ for (i, xi) in x.iter().enumerate() {
+ mac_digit(&mut acc[i..], y, *xi);
+ }
+ } else if x.len() <= 256 {
+ // Karatsuba multiplication:
+ //
+ // The idea is that we break x and y up into two smaller numbers that each have about half
+ // as many digits, like so (note that multiplying by b is just a shift):
+ //
+ // x = x0 + x1 * b
+ // y = y0 + y1 * b
+ //
+ // With some algebra, we can compute x * y with three smaller products, where the inputs to
+ // each of the smaller products have only about half as many digits as x and y:
+ //
+ // x * y = (x0 + x1 * b) * (y0 + y1 * b)
+ //
+ // x * y = x0 * y0
+ // + x0 * y1 * b
+ // + x1 * y0 * b
+ // + x1 * y1 * b^2
+ //
+ // Let p0 = x0 * y0 and p2 = x1 * y1:
+ //
+ // x * y = p0
+ // + (x0 * y1 + x1 * y0) * b
+ // + p2 * b^2
+ //
+ // The real trick is that middle term:
+ //
+ // x0 * y1 + x1 * y0
+ //
+ // = x0 * y1 + x1 * y0 - p0 + p0 - p2 + p2
+ //
+ // = x0 * y1 + x1 * y0 - x0 * y0 - x1 * y1 + p0 + p2
+ //
+ // Now we complete the square:
+ //
+ // = -(x0 * y0 - x0 * y1 - x1 * y0 + x1 * y1) + p0 + p2
+ //
+ // = -((x1 - x0) * (y1 - y0)) + p0 + p2
+ //
+ // Let p1 = (x1 - x0) * (y1 - y0), and substitute back into our original formula:
+ //
+ // x * y = p0
+ // + (p0 + p2 - p1) * b
+ // + p2 * b^2
+ //
+ // Where the three intermediate products are:
+ //
+ // p0 = x0 * y0
+ // p1 = (x1 - x0) * (y1 - y0)
+ // p2 = x1 * y1
+ //
+ // In doing the computation, we take great care to avoid unnecessary temporary variables
+ // (since creating a BigUint requires a heap allocation): thus, we rearrange the formula a
+ // bit so we can use the same temporary variable for all the intermediate products:
+ //
+ // x * y = p2 * b^2 + p2 * b
+ // + p0 * b + p0
+ // - p1 * b
+ //
+ // The other trick we use is instead of doing explicit shifts, we slice acc at the
+ // appropriate offset when doing the add.
+
+ // When x is smaller than y, it's significantly faster to pick b such that x is split in
+ // half, not y:
+ let b = x.len() / 2;
+ let (x0, x1) = x.split_at(b);
+ let (y0, y1) = y.split_at(b);
+
+ // We reuse the same BigUint for all the intermediate multiplies and have to size p
+ // appropriately here: x1.len() >= x0.len and y1.len() >= y0.len():
+ let len = x1.len() + y1.len() + 1;
+ let mut p = BigUint { data: vec![0; len] };
+
+ // p2 = x1 * y1
+ mac3(&mut p.data[..], x1, y1);
+
+ // Not required, but the adds go faster if we drop any unneeded 0s from the end:
+ p.normalize();
+
+ add2(&mut acc[b..], &p.data[..]);
+ add2(&mut acc[b * 2..], &p.data[..]);
+
+ // Zero out p before the next multiply:
+ p.data.truncate(0);
+ p.data.resize(len, 0);
+
+ // p0 = x0 * y0
+ mac3(&mut p.data[..], x0, y0);
+ p.normalize();
+
+ add2(&mut acc[..], &p.data[..]);
+ add2(&mut acc[b..], &p.data[..]);
+
+ // p1 = (x1 - x0) * (y1 - y0)
+ // We do this one last, since it may be negative and acc can't ever be negative:
+ let (j0_sign, j0) = sub_sign(x1, x0);
+ let (j1_sign, j1) = sub_sign(y1, y0);
+
+ match j0_sign * j1_sign {
+ Plus => {
+ p.data.truncate(0);
+ p.data.resize(len, 0);
+
+ mac3(&mut p.data[..], &j0.data[..], &j1.data[..]);
+ p.normalize();
+
+ sub2(&mut acc[b..], &p.data[..]);
+ }
+ Minus => {
+ mac3(&mut acc[b..], &j0.data[..], &j1.data[..]);
+ }
+ NoSign => (),
+ }
+ } else {
+ // Toom-3 multiplication:
+ //
+ // Toom-3 is like Karatsuba above, but dividing the inputs into three parts.
+ // Both are instances of Toom-Cook, using `k=3` and `k=2` respectively.
+ //
+ // The general idea is to treat the large integers digits as
+ // polynomials of a certain degree and determine the coefficients/digits
+ // of the product of the two via interpolation of the polynomial product.
+ let i = y.len() / 3 + 1;
+
+ let x0_len = Ord::min(x.len(), i);
+ let x1_len = Ord::min(x.len() - x0_len, i);
+
+ let y0_len = i;
+ let y1_len = Ord::min(y.len() - y0_len, i);
+
+ // Break x and y into three parts, representating an order two polynomial.
+ // t is chosen to be the size of a digit so we can use faster shifts
+ // in place of multiplications.
+ //
+ // x(t) = x2*t^2 + x1*t + x0
+ let x0 = bigint_from_slice(&x[..x0_len]);
+ let x1 = bigint_from_slice(&x[x0_len..x0_len + x1_len]);
+ let x2 = bigint_from_slice(&x[x0_len + x1_len..]);
+
+ // y(t) = y2*t^2 + y1*t + y0
+ let y0 = bigint_from_slice(&y[..y0_len]);
+ let y1 = bigint_from_slice(&y[y0_len..y0_len + y1_len]);
+ let y2 = bigint_from_slice(&y[y0_len + y1_len..]);
+
+ // Let w(t) = x(t) * y(t)
+ //
+ // This gives us the following order-4 polynomial.
+ //
+ // w(t) = w4*t^4 + w3*t^3 + w2*t^2 + w1*t + w0
+ //
+ // We need to find the coefficients w4, w3, w2, w1 and w0. Instead
+ // of simply multiplying the x and y in total, we can evaluate w
+ // at 5 points. An n-degree polynomial is uniquely identified by (n + 1)
+ // points.
+ //
+ // It is arbitrary as to what points we evaluate w at but we use the
+ // following.
+ //
+ // w(t) at t = 0, 1, -1, -2 and inf
+ //
+ // The values for w(t) in terms of x(t)*y(t) at these points are:
+ //
+ // let a = w(0) = x0 * y0
+ // let b = w(1) = (x2 + x1 + x0) * (y2 + y1 + y0)
+ // let c = w(-1) = (x2 - x1 + x0) * (y2 - y1 + y0)
+ // let d = w(-2) = (4*x2 - 2*x1 + x0) * (4*y2 - 2*y1 + y0)
+ // let e = w(inf) = x2 * y2 as t -> inf
+
+ // x0 + x2, avoiding temporaries
+ let p = &x0 + &x2;
+
+ // y0 + y2, avoiding temporaries
+ let q = &y0 + &y2;
+
+ // x2 - x1 + x0, avoiding temporaries
+ let p2 = &p - &x1;
+
+ // y2 - y1 + y0, avoiding temporaries
+ let q2 = &q - &y1;
+
+ // w(0)
+ let r0 = &x0 * &y0;
+
+ // w(inf)
+ let r4 = &x2 * &y2;
+
+ // w(1)
+ let r1 = (p + x1) * (q + y1);
+
+ // w(-1)
+ let r2 = &p2 * &q2;
+
+ // w(-2)
+ let r3 = ((p2 + x2) * 2 - x0) * ((q2 + y2) * 2 - y0);
+
+ // Evaluating these points gives us the following system of linear equations.
+ //
+ // 0 0 0 0 1 | a
+ // 1 1 1 1 1 | b
+ // 1 -1 1 -1 1 | c
+ // 16 -8 4 -2 1 | d
+ // 1 0 0 0 0 | e
+ //
+ // The solved equation (after gaussian elimination or similar)
+ // in terms of its coefficients:
+ //
+ // w0 = w(0)
+ // w1 = w(0)/2 + w(1)/3 - w(-1) + w(2)/6 - 2*w(inf)
+ // w2 = -w(0) + w(1)/2 + w(-1)/2 - w(inf)
+ // w3 = -w(0)/2 + w(1)/6 + w(-1)/2 - w(1)/6
+ // w4 = w(inf)
+ //
+ // This particular sequence is given by Bodrato and is an interpolation
+ // of the above equations.
+ let mut comp3: BigInt = (r3 - &r1) / 3;
+ let mut comp1: BigInt = (r1 - &r2) / 2;
+ let mut comp2: BigInt = r2 - &r0;
+ comp3 = (&comp2 - comp3) / 2 + &r4 * 2;
+ comp2 += &comp1 - &r4;
+ comp1 -= &comp3;
+
+ // Recomposition. The coefficients of the polynomial are now known.
+ //
+ // Evaluate at w(t) where t is our given base to get the result.
+ let bits = u64::from(big_digit::BITS) * i as u64;
+ let result = r0
+ + (comp1 << bits)
+ + (comp2 << (2 * bits))
+ + (comp3 << (3 * bits))
+ + (r4 << (4 * bits));
+ let result_pos = result.to_biguint().unwrap();
+ add2(&mut acc[..], &result_pos.data);
+ }
+}
+
+fn mul3(x: &[BigDigit], y: &[BigDigit]) -> BigUint {
+ let len = x.len() + y.len() + 1;
+ let mut prod = BigUint { data: vec![0; len] };
+
+ mac3(&mut prod.data[..], x, y);
+ prod.normalized()
+}
+
+fn scalar_mul(a: &mut [BigDigit], b: BigDigit) -> BigDigit {
+ let mut carry = 0;
+ for a in a.iter_mut() {
+ *a = mul_with_carry(*a, b, &mut carry);
+ }
+ carry as BigDigit
+}
+
+fn sub_sign(mut a: &[BigDigit], mut b: &[BigDigit]) -> (Sign, BigUint) {
+ // Normalize:
+ a = &a[..a.iter().rposition(|&x| x != 0).map_or(0, |i| i + 1)];
+ b = &b[..b.iter().rposition(|&x| x != 0).map_or(0, |i| i + 1)];
+
+ match cmp_slice(a, b) {
+ Ordering::Greater => {
+ let mut a = a.to_vec();
+ sub2(&mut a, b);
+ (Plus, biguint_from_vec(a))
+ }
+ Ordering::Less => {
+ let mut b = b.to_vec();
+ sub2(&mut b, a);
+ (Minus, biguint_from_vec(b))
+ }
+ Ordering::Equal => (NoSign, Zero::zero()),
+ }
+}
+
+forward_all_binop_to_ref_ref!(impl Mul for BigUint, mul);
+forward_val_assign!(impl MulAssign for BigUint, mul_assign);
+
+impl<'a, 'b> Mul<&'b BigUint> for &'a BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn mul(self, other: &BigUint) -> BigUint {
+ mul3(&self.data[..], &other.data[..])
+ }
+}
+impl<'a> MulAssign<&'a BigUint> for BigUint {
+ #[inline]
+ fn mul_assign(&mut self, other: &'a BigUint) {
+ *self = &*self * other
+ }
+}
+
+promote_unsigned_scalars!(impl Mul for BigUint, mul);
+promote_unsigned_scalars_assign!(impl MulAssign for BigUint, mul_assign);
+forward_all_scalar_binop_to_val_val_commutative!(impl Mul<u32> for BigUint, mul);
+forward_all_scalar_binop_to_val_val_commutative!(impl Mul<u64> for BigUint, mul);
+forward_all_scalar_binop_to_val_val_commutative!(impl Mul<u128> for BigUint, mul);
+
+impl Mul<u32> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn mul(mut self, other: u32) -> BigUint {
+ self *= other;
+ self
+ }
+}
+impl MulAssign<u32> for BigUint {
+ #[inline]
+ fn mul_assign(&mut self, other: u32) {
+ if other == 0 {
+ self.data.clear();
+ } else {
+ let carry = scalar_mul(&mut self.data[..], other as BigDigit);
+ if carry != 0 {
+ self.data.push(carry);
+ }
+ }
+ }
+}
+
+impl Mul<u64> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn mul(mut self, other: u64) -> BigUint {
+ self *= other;
+ self
+ }
+}
+impl MulAssign<u64> for BigUint {
+ #[cfg(not(u64_digit))]
+ #[inline]
+ fn mul_assign(&mut self, other: u64) {
+ if other == 0 {
+ self.data.clear();
+ } else if other <= u64::from(BigDigit::max_value()) {
+ *self *= other as BigDigit
+ } else {
+ let (hi, lo) = big_digit::from_doublebigdigit(other);
+ *self = mul3(&self.data[..], &[lo, hi])
+ }
+ }
+
+ #[cfg(u64_digit)]
+ #[inline]
+ fn mul_assign(&mut self, other: u64) {
+ if other == 0 {
+ self.data.clear();
+ } else {
+ let carry = scalar_mul(&mut self.data[..], other as BigDigit);
+ if carry != 0 {
+ self.data.push(carry);
+ }
+ }
+ }
+}
+
+impl Mul<u128> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn mul(mut self, other: u128) -> BigUint {
+ self *= other;
+ self
+ }
+}
+
+impl MulAssign<u128> for BigUint {
+ #[cfg(not(u64_digit))]
+ #[inline]
+ fn mul_assign(&mut self, other: u128) {
+ if other == 0 {
+ self.data.clear();
+ } else if other <= u128::from(BigDigit::max_value()) {
+ *self *= other as BigDigit
+ } else {
+ let (a, b, c, d) = u32_from_u128(other);
+ *self = mul3(&self.data[..], &[d, c, b, a])
+ }
+ }
+
+ #[cfg(u64_digit)]
+ #[inline]
+ fn mul_assign(&mut self, other: u128) {
+ if other == 0 {
+ self.data.clear();
+ } else if other <= BigDigit::max_value() as u128 {
+ *self *= other as BigDigit
+ } else {
+ let (hi, lo) = big_digit::from_doublebigdigit(other);
+ *self = mul3(&self.data[..], &[lo, hi])
+ }
+ }
+}
+
+impl CheckedMul for BigUint {
+ #[inline]
+ fn checked_mul(&self, v: &BigUint) -> Option<BigUint> {
+ Some(self.mul(v))
+ }
+}
+
+impl_product_iter_type!(BigUint);
+
+#[test]
+fn test_sub_sign() {
+ use crate::BigInt;
+ use num_traits::Num;
+
+ fn sub_sign_i(a: &[BigDigit], b: &[BigDigit]) -> BigInt {
+ let (sign, val) = sub_sign(a, b);
+ BigInt::from_biguint(sign, val)
+ }
+
+ let a = BigUint::from_str_radix("265252859812191058636308480000000", 10).unwrap();
+ let b = BigUint::from_str_radix("26525285981219105863630848000000", 10).unwrap();
+ let a_i = BigInt::from(a.clone());
+ let b_i = BigInt::from(b.clone());
+
+ assert_eq!(sub_sign_i(&a.data[..], &b.data[..]), &a_i - &b_i);
+ assert_eq!(sub_sign_i(&b.data[..], &a.data[..]), &b_i - &a_i);
+}
diff --git a/src/biguint/power.rs b/src/biguint/power.rs
new file mode 100644
index 0000000..44b3814
--- /dev/null
+++ b/src/biguint/power.rs
@@ -0,0 +1,257 @@
+use super::monty::monty_modpow;
+use super::BigUint;
+
+use crate::big_digit::{self, BigDigit};
+
+use num_integer::Integer;
+use num_traits::{One, Pow, ToPrimitive, Zero};
+
+impl<'b> Pow<&'b BigUint> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn pow(self, exp: &BigUint) -> BigUint {
+ if self.is_one() || exp.is_zero() {
+ BigUint::one()
+ } else if self.is_zero() {
+ BigUint::zero()
+ } else if let Some(exp) = exp.to_u64() {
+ self.pow(exp)
+ } else if let Some(exp) = exp.to_u128() {
+ self.pow(exp)
+ } else {
+ // At this point, `self >= 2` and `exp >= 2¹²⁸`. The smallest possible result given
+ // `2.pow(2¹²⁸)` would require far more memory than 64-bit targets can address!
+ panic!("memory overflow")
+ }
+ }
+}
+
+impl Pow<BigUint> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn pow(self, exp: BigUint) -> BigUint {
+ Pow::pow(self, &exp)
+ }
+}
+
+impl<'a, 'b> Pow<&'b BigUint> for &'a BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn pow(self, exp: &BigUint) -> BigUint {
+ if self.is_one() || exp.is_zero() {
+ BigUint::one()
+ } else if self.is_zero() {
+ BigUint::zero()
+ } else {
+ self.clone().pow(exp)
+ }
+ }
+}
+
+impl<'a> Pow<BigUint> for &'a BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn pow(self, exp: BigUint) -> BigUint {
+ Pow::pow(self, &exp)
+ }
+}
+
+macro_rules! pow_impl {
+ ($T:ty) => {
+ impl Pow<$T> for BigUint {
+ type Output = BigUint;
+
+ fn pow(self, mut exp: $T) -> BigUint {
+ if exp == 0 {
+ return BigUint::one();
+ }
+ let mut base = self;
+
+ while exp & 1 == 0 {
+ base = &base * &base;
+ exp >>= 1;
+ }
+
+ if exp == 1 {
+ return base;
+ }
+
+ let mut acc = base.clone();
+ while exp > 1 {
+ exp >>= 1;
+ base = &base * &base;
+ if exp & 1 == 1 {
+ acc = &acc * &base;
+ }
+ }
+ acc
+ }
+ }
+
+ impl<'b> Pow<&'b $T> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn pow(self, exp: &$T) -> BigUint {
+ Pow::pow(self, *exp)
+ }
+ }
+
+ impl<'a> Pow<$T> for &'a BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn pow(self, exp: $T) -> BigUint {
+ if exp == 0 {
+ return BigUint::one();
+ }
+ Pow::pow(self.clone(), exp)
+ }
+ }
+
+ impl<'a, 'b> Pow<&'b $T> for &'a BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn pow(self, exp: &$T) -> BigUint {
+ Pow::pow(self, *exp)
+ }
+ }
+ };
+}
+
+pow_impl!(u8);
+pow_impl!(u16);
+pow_impl!(u32);
+pow_impl!(u64);
+pow_impl!(usize);
+pow_impl!(u128);
+
+pub(super) fn modpow(x: &BigUint, exponent: &BigUint, modulus: &BigUint) -> BigUint {
+ assert!(
+ !modulus.is_zero(),
+ "attempt to calculate with zero modulus!"
+ );
+
+ if modulus.is_odd() {
+ // For an odd modulus, we can use Montgomery multiplication in base 2^32.
+ monty_modpow(x, exponent, modulus)
+ } else {
+ // Otherwise do basically the same as `num::pow`, but with a modulus.
+ plain_modpow(x, &exponent.data, modulus)
+ }
+}
+
+fn plain_modpow(base: &BigUint, exp_data: &[BigDigit], modulus: &BigUint) -> BigUint {
+ assert!(
+ !modulus.is_zero(),
+ "attempt to calculate with zero modulus!"
+ );
+
+ let i = match exp_data.iter().position(|&r| r != 0) {
+ None => return BigUint::one(),
+ Some(i) => i,
+ };
+
+ let mut base = base % modulus;
+ for _ in 0..i {
+ for _ in 0..big_digit::BITS {
+ base = &base * &base % modulus;
+ }
+ }
+
+ let mut r = exp_data[i];
+ let mut b = 0u8;
+ while r.is_even() {
+ base = &base * &base % modulus;
+ r >>= 1;
+ b += 1;
+ }
+
+ let mut exp_iter = exp_data[i + 1..].iter();
+ if exp_iter.len() == 0 && r.is_one() {
+ return base;
+ }
+
+ let mut acc = base.clone();
+ r >>= 1;
+ b += 1;
+
+ {
+ let mut unit = |exp_is_odd| {
+ base = &base * &base % modulus;
+ if exp_is_odd {
+ acc = &acc * &base % modulus;
+ }
+ };
+
+ if let Some(&last) = exp_iter.next_back() {
+ // consume exp_data[i]
+ for _ in b..big_digit::BITS {
+ unit(r.is_odd());
+ r >>= 1;
+ }
+
+ // consume all other digits before the last
+ for &r in exp_iter {
+ let mut r = r;
+ for _ in 0..big_digit::BITS {
+ unit(r.is_odd());
+ r >>= 1;
+ }
+ }
+ r = last;
+ }
+
+ debug_assert_ne!(r, 0);
+ while !r.is_zero() {
+ unit(r.is_odd());
+ r >>= 1;
+ }
+ }
+ acc
+}
+
+#[test]
+fn test_plain_modpow() {
+ let two = &BigUint::from(2u32);
+ let modulus = BigUint::from(0x1100u32);
+
+ let exp = vec![0, 0b1];
+ assert_eq!(
+ two.pow(0b1_00000000_u32) % &modulus,
+ plain_modpow(&two, &exp, &modulus)
+ );
+ let exp = vec![0, 0b10];
+ assert_eq!(
+ two.pow(0b10_00000000_u32) % &modulus,
+ plain_modpow(&two, &exp, &modulus)
+ );
+ let exp = vec![0, 0b110010];
+ assert_eq!(
+ two.pow(0b110010_00000000_u32) % &modulus,
+ plain_modpow(&two, &exp, &modulus)
+ );
+ let exp = vec![0b1, 0b1];
+ assert_eq!(
+ two.pow(0b1_00000001_u32) % &modulus,
+ plain_modpow(&two, &exp, &modulus)
+ );
+ let exp = vec![0b1100, 0, 0b1];
+ assert_eq!(
+ two.pow(0b1_00000000_00001100_u32) % &modulus,
+ plain_modpow(&two, &exp, &modulus)
+ );
+}
+
+#[test]
+fn test_pow_biguint() {
+ let base = BigUint::from(5u8);
+ let exponent = BigUint::from(3u8);
+
+ assert_eq!(BigUint::from(125u8), base.pow(exponent));
+}
diff --git a/src/biguint/serde.rs b/src/biguint/serde.rs
new file mode 100644
index 0000000..573b0a7
--- /dev/null
+++ b/src/biguint/serde.rs
@@ -0,0 +1,108 @@
+use super::{biguint_from_vec, BigUint};
+
+use crate::std_alloc::Vec;
+
+use core::fmt;
+use serde::de::{SeqAccess, Visitor};
+use serde::{Deserialize, Deserializer, Serialize, Serializer};
+
+impl Serialize for BigUint {
+ #[cfg(not(u64_digit))]
+ fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+ where
+ S: Serializer,
+ {
+ // Note: do not change the serialization format, or it may break forward
+ // and backward compatibility of serialized data! If we ever change the
+ // internal representation, we should still serialize in base-`u32`.
+ let data: &[u32] = &self.data;
+ data.serialize(serializer)
+ }
+
+ #[cfg(u64_digit)]
+ fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+ where
+ S: Serializer,
+ {
+ use serde::ser::SerializeSeq;
+
+ if let Some((&last, data)) = self.data.split_last() {
+ let last_lo = last as u32;
+ let last_hi = (last >> 32) as u32;
+ let u32_len = data.len() * 2 + 1 + (last_hi != 0) as usize;
+ let mut seq = serializer.serialize_seq(Some(u32_len))?;
+ for &x in data {
+ seq.serialize_element(&(x as u32))?;
+ seq.serialize_element(&((x >> 32) as u32))?;
+ }
+ seq.serialize_element(&last_lo)?;
+ if last_hi != 0 {
+ seq.serialize_element(&last_hi)?;
+ }
+ seq.end()
+ } else {
+ let data: &[u32] = &[];
+ data.serialize(serializer)
+ }
+ }
+}
+
+impl<'de> Deserialize<'de> for BigUint {
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where
+ D: Deserializer<'de>,
+ {
+ deserializer.deserialize_seq(U32Visitor)
+ }
+}
+
+struct U32Visitor;
+
+impl<'de> Visitor<'de> for U32Visitor {
+ type Value = BigUint;
+
+ fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+ formatter.write_str("a sequence of unsigned 32-bit numbers")
+ }
+
+ #[cfg(not(u64_digit))]
+ fn visit_seq<S>(self, mut seq: S) -> Result<Self::Value, S::Error>
+ where
+ S: SeqAccess<'de>,
+ {
+ let len = seq.size_hint().unwrap_or(0);
+ let mut data = Vec::with_capacity(len);
+
+ while let Some(value) = seq.next_element::<u32>()? {
+ data.push(value);
+ }
+
+ Ok(biguint_from_vec(data))
+ }
+
+ #[cfg(u64_digit)]
+ fn visit_seq<S>(self, mut seq: S) -> Result<Self::Value, S::Error>
+ where
+ S: SeqAccess<'de>,
+ {
+ use crate::big_digit::BigDigit;
+ use num_integer::Integer;
+
+ let u32_len = seq.size_hint().unwrap_or(0);
+ let len = u32_len.div_ceil(&2);
+ let mut data = Vec::with_capacity(len);
+
+ while let Some(lo) = seq.next_element::<u32>()? {
+ let mut value = BigDigit::from(lo);
+ if let Some(hi) = seq.next_element::<u32>()? {
+ value |= BigDigit::from(hi) << 32;
+ data.push(value);
+ } else {
+ data.push(value);
+ break;
+ }
+ }
+
+ Ok(biguint_from_vec(data))
+ }
+}
diff --git a/src/biguint/shift.rs b/src/biguint/shift.rs
new file mode 100644
index 0000000..05964d2
--- /dev/null
+++ b/src/biguint/shift.rs
@@ -0,0 +1,172 @@
+use super::{biguint_from_vec, BigUint};
+
+use crate::big_digit;
+use crate::std_alloc::{Cow, Vec};
+
+use core::mem;
+use core::ops::{Shl, ShlAssign, Shr, ShrAssign};
+use num_traits::{PrimInt, Zero};
+
+#[inline]
+fn biguint_shl<T: PrimInt>(n: Cow<'_, BigUint>, shift: T) -> BigUint {
+ if shift < T::zero() {
+ panic!("attempt to shift left with negative");
+ }
+ if n.is_zero() {
+ return n.into_owned();
+ }
+ let bits = T::from(big_digit::BITS).unwrap();
+ let digits = (shift / bits).to_usize().expect("capacity overflow");
+ let shift = (shift % bits).to_u8().unwrap();
+ biguint_shl2(n, digits, shift)
+}
+
+fn biguint_shl2(n: Cow<'_, BigUint>, digits: usize, shift: u8) -> BigUint {
+ let mut data = match digits {
+ 0 => n.into_owned().data,
+ _ => {
+ let len = digits.saturating_add(n.data.len() + 1);
+ let mut data = Vec::with_capacity(len);
+ data.resize(digits, 0);
+ data.extend(n.data.iter());
+ data
+ }
+ };
+
+ if shift > 0 {
+ let mut carry = 0;
+ let carry_shift = big_digit::BITS as u8 - shift;
+ for elem in data[digits..].iter_mut() {
+ let new_carry = *elem >> carry_shift;
+ *elem = (*elem << shift) | carry;
+ carry = new_carry;
+ }
+ if carry != 0 {
+ data.push(carry);
+ }
+ }
+
+ biguint_from_vec(data)
+}
+
+#[inline]
+fn biguint_shr<T: PrimInt>(n: Cow<'_, BigUint>, shift: T) -> BigUint {
+ if shift < T::zero() {
+ panic!("attempt to shift right with negative");
+ }
+ if n.is_zero() {
+ return n.into_owned();
+ }
+ let bits = T::from(big_digit::BITS).unwrap();
+ let digits = (shift / bits).to_usize().unwrap_or(core::usize::MAX);
+ let shift = (shift % bits).to_u8().unwrap();
+ biguint_shr2(n, digits, shift)
+}
+
+fn biguint_shr2(n: Cow<'_, BigUint>, digits: usize, shift: u8) -> BigUint {
+ if digits >= n.data.len() {
+ let mut n = n.into_owned();
+ n.set_zero();
+ return n;
+ }
+ let mut data = match n {
+ Cow::Borrowed(n) => n.data[digits..].to_vec(),
+ Cow::Owned(mut n) => {
+ n.data.drain(..digits);
+ n.data
+ }
+ };
+
+ if shift > 0 {
+ let mut borrow = 0;
+ let borrow_shift = big_digit::BITS as u8 - shift;
+ for elem in data.iter_mut().rev() {
+ let new_borrow = *elem << borrow_shift;
+ *elem = (*elem >> shift) | borrow;
+ borrow = new_borrow;
+ }
+ }
+
+ biguint_from_vec(data)
+}
+
+macro_rules! impl_shift {
+ (@ref $Shx:ident :: $shx:ident, $ShxAssign:ident :: $shx_assign:ident, $rhs:ty) => {
+ impl<'b> $Shx<&'b $rhs> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn $shx(self, rhs: &'b $rhs) -> BigUint {
+ $Shx::$shx(self, *rhs)
+ }
+ }
+ impl<'a, 'b> $Shx<&'b $rhs> for &'a BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn $shx(self, rhs: &'b $rhs) -> BigUint {
+ $Shx::$shx(self, *rhs)
+ }
+ }
+ impl<'b> $ShxAssign<&'b $rhs> for BigUint {
+ #[inline]
+ fn $shx_assign(&mut self, rhs: &'b $rhs) {
+ $ShxAssign::$shx_assign(self, *rhs);
+ }
+ }
+ };
+ ($($rhs:ty),+) => {$(
+ impl Shl<$rhs> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn shl(self, rhs: $rhs) -> BigUint {
+ biguint_shl(Cow::Owned(self), rhs)
+ }
+ }
+ impl<'a> Shl<$rhs> for &'a BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn shl(self, rhs: $rhs) -> BigUint {
+ biguint_shl(Cow::Borrowed(self), rhs)
+ }
+ }
+ impl ShlAssign<$rhs> for BigUint {
+ #[inline]
+ fn shl_assign(&mut self, rhs: $rhs) {
+ let n = mem::replace(self, BigUint::zero());
+ *self = n << rhs;
+ }
+ }
+ impl_shift! { @ref Shl::shl, ShlAssign::shl_assign, $rhs }
+
+ impl Shr<$rhs> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn shr(self, rhs: $rhs) -> BigUint {
+ biguint_shr(Cow::Owned(self), rhs)
+ }
+ }
+ impl<'a> Shr<$rhs> for &'a BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn shr(self, rhs: $rhs) -> BigUint {
+ biguint_shr(Cow::Borrowed(self), rhs)
+ }
+ }
+ impl ShrAssign<$rhs> for BigUint {
+ #[inline]
+ fn shr_assign(&mut self, rhs: $rhs) {
+ let n = mem::replace(self, BigUint::zero());
+ *self = n >> rhs;
+ }
+ }
+ impl_shift! { @ref Shr::shr, ShrAssign::shr_assign, $rhs }
+ )*};
+}
+
+impl_shift! { u8, u16, u32, u64, u128, usize }
+impl_shift! { i8, i16, i32, i64, i128, isize }
diff --git a/src/biguint/subtraction.rs b/src/biguint/subtraction.rs
new file mode 100644
index 0000000..6700517
--- /dev/null
+++ b/src/biguint/subtraction.rs
@@ -0,0 +1,312 @@
+#[cfg(not(u64_digit))]
+use super::u32_from_u128;
+use super::BigUint;
+
+use crate::big_digit::{self, BigDigit};
+use crate::UsizePromotion;
+
+use core::cmp::Ordering::{Equal, Greater, Less};
+use core::ops::{Sub, SubAssign};
+use num_traits::{CheckedSub, Zero};
+
+#[cfg(all(use_addcarry, target_arch = "x86_64"))]
+use core::arch::x86_64 as arch;
+
+#[cfg(all(use_addcarry, target_arch = "x86"))]
+use core::arch::x86 as arch;
+
+// Subtract with borrow:
+#[cfg(all(use_addcarry, u64_digit))]
+#[inline]
+fn sbb(borrow: u8, a: u64, b: u64, out: &mut u64) -> u8 {
+ // Safety: There are absolutely no safety concerns with calling `_subborrow_u64`.
+ // It's just unsafe for API consistency with other intrinsics.
+ unsafe { arch::_subborrow_u64(borrow, a, b, out) }
+}
+
+#[cfg(all(use_addcarry, not(u64_digit)))]
+#[inline]
+fn sbb(borrow: u8, a: u32, b: u32, out: &mut u32) -> u8 {
+ // Safety: There are absolutely no safety concerns with calling `_subborrow_u32`.
+ // It's just unsafe for API consistency with other intrinsics.
+ unsafe { arch::_subborrow_u32(borrow, a, b, out) }
+}
+
+// fallback for environments where we don't have a subborrow intrinsic
+#[cfg(not(use_addcarry))]
+#[inline]
+fn sbb(borrow: u8, a: BigDigit, b: BigDigit, out: &mut BigDigit) -> u8 {
+ use crate::big_digit::SignedDoubleBigDigit;
+
+ let difference = SignedDoubleBigDigit::from(a)
+ - SignedDoubleBigDigit::from(b)
+ - SignedDoubleBigDigit::from(borrow);
+ *out = difference as BigDigit;
+ u8::from(difference < 0)
+}
+
+pub(super) fn sub2(a: &mut [BigDigit], b: &[BigDigit]) {
+ let mut borrow = 0;
+
+ let len = Ord::min(a.len(), b.len());
+ let (a_lo, a_hi) = a.split_at_mut(len);
+ let (b_lo, b_hi) = b.split_at(len);
+
+ for (a, b) in a_lo.iter_mut().zip(b_lo) {
+ borrow = sbb(borrow, *a, *b, a);
+ }
+
+ if borrow != 0 {
+ for a in a_hi {
+ borrow = sbb(borrow, *a, 0, a);
+ if borrow == 0 {
+ break;
+ }
+ }
+ }
+
+ // note: we're _required_ to fail on underflow
+ assert!(
+ borrow == 0 && b_hi.iter().all(|x| *x == 0),
+ "Cannot subtract b from a because b is larger than a."
+ );
+}
+
+// Only for the Sub impl. `a` and `b` must have same length.
+#[inline]
+fn __sub2rev(a: &[BigDigit], b: &mut [BigDigit]) -> u8 {
+ debug_assert!(b.len() == a.len());
+
+ let mut borrow = 0;
+
+ for (ai, bi) in a.iter().zip(b) {
+ borrow = sbb(borrow, *ai, *bi, bi);
+ }
+
+ borrow
+}
+
+fn sub2rev(a: &[BigDigit], b: &mut [BigDigit]) {
+ debug_assert!(b.len() >= a.len());
+
+ let len = Ord::min(a.len(), b.len());
+ let (a_lo, a_hi) = a.split_at(len);
+ let (b_lo, b_hi) = b.split_at_mut(len);
+
+ let borrow = __sub2rev(a_lo, b_lo);
+
+ assert!(a_hi.is_empty());
+
+ // note: we're _required_ to fail on underflow
+ assert!(
+ borrow == 0 && b_hi.iter().all(|x| *x == 0),
+ "Cannot subtract b from a because b is larger than a."
+ );
+}
+
+forward_val_val_binop!(impl Sub for BigUint, sub);
+forward_ref_ref_binop!(impl Sub for BigUint, sub);
+forward_val_assign!(impl SubAssign for BigUint, sub_assign);
+
+impl<'a> Sub<&'a BigUint> for BigUint {
+ type Output = BigUint;
+
+ fn sub(mut self, other: &BigUint) -> BigUint {
+ self -= other;
+ self
+ }
+}
+impl<'a> SubAssign<&'a BigUint> for BigUint {
+ fn sub_assign(&mut self, other: &'a BigUint) {
+ sub2(&mut self.data[..], &other.data[..]);
+ self.normalize();
+ }
+}
+
+impl<'a> Sub<BigUint> for &'a BigUint {
+ type Output = BigUint;
+
+ fn sub(self, mut other: BigUint) -> BigUint {
+ let other_len = other.data.len();
+ if other_len < self.data.len() {
+ let lo_borrow = __sub2rev(&self.data[..other_len], &mut other.data);
+ other.data.extend_from_slice(&self.data[other_len..]);
+ if lo_borrow != 0 {
+ sub2(&mut other.data[other_len..], &[1])
+ }
+ } else {
+ sub2rev(&self.data[..], &mut other.data[..]);
+ }
+ other.normalized()
+ }
+}
+
+promote_unsigned_scalars!(impl Sub for BigUint, sub);
+promote_unsigned_scalars_assign!(impl SubAssign for BigUint, sub_assign);
+forward_all_scalar_binop_to_val_val!(impl Sub<u32> for BigUint, sub);
+forward_all_scalar_binop_to_val_val!(impl Sub<u64> for BigUint, sub);
+forward_all_scalar_binop_to_val_val!(impl Sub<u128> for BigUint, sub);
+
+impl Sub<u32> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn sub(mut self, other: u32) -> BigUint {
+ self -= other;
+ self
+ }
+}
+
+impl SubAssign<u32> for BigUint {
+ fn sub_assign(&mut self, other: u32) {
+ sub2(&mut self.data[..], &[other as BigDigit]);
+ self.normalize();
+ }
+}
+
+impl Sub<BigUint> for u32 {
+ type Output = BigUint;
+
+ #[cfg(not(u64_digit))]
+ #[inline]
+ fn sub(self, mut other: BigUint) -> BigUint {
+ if other.data.len() == 0 {
+ other.data.push(self);
+ } else {
+ sub2rev(&[self], &mut other.data[..]);
+ }
+ other.normalized()
+ }
+
+ #[cfg(u64_digit)]
+ #[inline]
+ fn sub(self, mut other: BigUint) -> BigUint {
+ if other.data.is_empty() {
+ other.data.push(self as BigDigit);
+ } else {
+ sub2rev(&[self as BigDigit], &mut other.data[..]);
+ }
+ other.normalized()
+ }
+}
+
+impl Sub<u64> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn sub(mut self, other: u64) -> BigUint {
+ self -= other;
+ self
+ }
+}
+
+impl SubAssign<u64> for BigUint {
+ #[cfg(not(u64_digit))]
+ #[inline]
+ fn sub_assign(&mut self, other: u64) {
+ let (hi, lo) = big_digit::from_doublebigdigit(other);
+ sub2(&mut self.data[..], &[lo, hi]);
+ self.normalize();
+ }
+
+ #[cfg(u64_digit)]
+ #[inline]
+ fn sub_assign(&mut self, other: u64) {
+ sub2(&mut self.data[..], &[other as BigDigit]);
+ self.normalize();
+ }
+}
+
+impl Sub<BigUint> for u64 {
+ type Output = BigUint;
+
+ #[cfg(not(u64_digit))]
+ #[inline]
+ fn sub(self, mut other: BigUint) -> BigUint {
+ while other.data.len() < 2 {
+ other.data.push(0);
+ }
+
+ let (hi, lo) = big_digit::from_doublebigdigit(self);
+ sub2rev(&[lo, hi], &mut other.data[..]);
+ other.normalized()
+ }
+
+ #[cfg(u64_digit)]
+ #[inline]
+ fn sub(self, mut other: BigUint) -> BigUint {
+ if other.data.is_empty() {
+ other.data.push(self);
+ } else {
+ sub2rev(&[self], &mut other.data[..]);
+ }
+ other.normalized()
+ }
+}
+
+impl Sub<u128> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn sub(mut self, other: u128) -> BigUint {
+ self -= other;
+ self
+ }
+}
+
+impl SubAssign<u128> for BigUint {
+ #[cfg(not(u64_digit))]
+ #[inline]
+ fn sub_assign(&mut self, other: u128) {
+ let (a, b, c, d) = u32_from_u128(other);
+ sub2(&mut self.data[..], &[d, c, b, a]);
+ self.normalize();
+ }
+
+ #[cfg(u64_digit)]
+ #[inline]
+ fn sub_assign(&mut self, other: u128) {
+ let (hi, lo) = big_digit::from_doublebigdigit(other);
+ sub2(&mut self.data[..], &[lo, hi]);
+ self.normalize();
+ }
+}
+
+impl Sub<BigUint> for u128 {
+ type Output = BigUint;
+
+ #[cfg(not(u64_digit))]
+ #[inline]
+ fn sub(self, mut other: BigUint) -> BigUint {
+ while other.data.len() < 4 {
+ other.data.push(0);
+ }
+
+ let (a, b, c, d) = u32_from_u128(self);
+ sub2rev(&[d, c, b, a], &mut other.data[..]);
+ other.normalized()
+ }
+
+ #[cfg(u64_digit)]
+ #[inline]
+ fn sub(self, mut other: BigUint) -> BigUint {
+ while other.data.len() < 2 {
+ other.data.push(0);
+ }
+
+ let (hi, lo) = big_digit::from_doublebigdigit(self);
+ sub2rev(&[lo, hi], &mut other.data[..]);
+ other.normalized()
+ }
+}
+
+impl CheckedSub for BigUint {
+ #[inline]
+ fn checked_sub(&self, v: &BigUint) -> Option<BigUint> {
+ match self.cmp(v) {
+ Less => None,
+ Equal => Some(Zero::zero()),
+ Greater => Some(self.sub(v)),
+ }
+ }
+}
diff --git a/src/lib.rs b/src/lib.rs
new file mode 100644
index 0000000..b88c5df
--- /dev/null
+++ b/src/lib.rs
@@ -0,0 +1,294 @@
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! A Big integer (signed version: `BigInt`, unsigned version: `BigUint`).
+//!
+//! A `BigUint` is represented as a vector of `BigDigit`s.
+//! A `BigInt` is a combination of `BigUint` and `Sign`.
+//!
+//! Common numerical operations are overloaded, so we can treat them
+//! the same way we treat other numbers.
+//!
+//! ## Example
+//!
+//! ```rust
+//! # fn main() {
+//! use num_bigint::BigUint;
+//! use num_traits::{Zero, One};
+//! use std::mem::replace;
+//!
+//! // Calculate large fibonacci numbers.
+//! fn fib(n: usize) -> BigUint {
+//! let mut f0: BigUint = Zero::zero();
+//! let mut f1: BigUint = One::one();
+//! for _ in 0..n {
+//! let f2 = f0 + &f1;
+//! // This is a low cost way of swapping f0 with f1 and f1 with f2.
+//! f0 = replace(&mut f1, f2);
+//! }
+//! f0
+//! }
+//!
+//! // This is a very large number.
+//! println!("fib(1000) = {}", fib(1000));
+//! # }
+//! ```
+//!
+//! It's easy to generate large random numbers:
+//!
+//! ```rust,ignore
+//! use num_bigint::{ToBigInt, RandBigInt};
+//!
+//! let mut rng = rand::thread_rng();
+//! let a = rng.gen_bigint(1000);
+//!
+//! let low = -10000.to_bigint().unwrap();
+//! let high = 10000.to_bigint().unwrap();
+//! let b = rng.gen_bigint_range(&low, &high);
+//!
+//! // Probably an even larger number.
+//! println!("{}", a * b);
+//! ```
+//!
+//! See the "Features" section for instructions for enabling random number generation.
+//!
+//! ## Features
+//!
+//! The `std` crate feature is enabled by default, and is mandatory before Rust
+//! 1.36 and the stabilized `alloc` crate. If you depend on `num-bigint` with
+//! `default-features = false`, you must manually enable the `std` feature yourself
+//! if your compiler is not new enough.
+//!
+//! ### Random Generation
+//!
+//! `num-bigint` supports the generation of random big integers when the `rand`
+//! feature is enabled. To enable it include rand as
+//!
+//! ```toml
+//! rand = "0.8"
+//! num-bigint = { version = "0.4", features = ["rand"] }
+//! ```
+//!
+//! Note that you must use the version of `rand` that `num-bigint` is compatible
+//! with: `0.8`.
+//!
+//!
+//! ## Compatibility
+//!
+//! The `num-bigint` crate is tested for rustc 1.31 and greater.
+
+#![doc(html_root_url = "https://docs.rs/num-bigint/0.4")]
+#![warn(rust_2018_idioms)]
+#![no_std]
+
+#[cfg(feature = "std")]
+#[macro_use]
+extern crate std;
+
+#[cfg(feature = "std")]
+mod std_alloc {
+ pub(crate) use std::borrow::Cow;
+ #[cfg(any(feature = "quickcheck"))]
+ pub(crate) use std::boxed::Box;
+ pub(crate) use std::string::String;
+ pub(crate) use std::vec::Vec;
+}
+
+#[cfg(not(feature = "std"))]
+#[macro_use]
+extern crate alloc;
+
+#[cfg(not(feature = "std"))]
+mod std_alloc {
+ pub(crate) use alloc::borrow::Cow;
+ #[cfg(any(feature = "quickcheck"))]
+ pub(crate) use alloc::boxed::Box;
+ pub(crate) use alloc::string::String;
+ pub(crate) use alloc::vec::Vec;
+}
+
+use core::fmt;
+#[cfg(feature = "std")]
+use std::error::Error;
+
+#[macro_use]
+mod macros;
+
+mod bigint;
+mod biguint;
+
+#[cfg(feature = "rand")]
+mod bigrand;
+
+#[cfg(target_pointer_width = "32")]
+type UsizePromotion = u32;
+#[cfg(target_pointer_width = "64")]
+type UsizePromotion = u64;
+
+#[cfg(target_pointer_width = "32")]
+type IsizePromotion = i32;
+#[cfg(target_pointer_width = "64")]
+type IsizePromotion = i64;
+
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub struct ParseBigIntError {
+ kind: BigIntErrorKind,
+}
+
+#[derive(Debug, Clone, PartialEq, Eq)]
+enum BigIntErrorKind {
+ Empty,
+ InvalidDigit,
+}
+
+impl ParseBigIntError {
+ fn __description(&self) -> &str {
+ use crate::BigIntErrorKind::*;
+ match self.kind {
+ Empty => "cannot parse integer from empty string",
+ InvalidDigit => "invalid digit found in string",
+ }
+ }
+
+ fn empty() -> Self {
+ ParseBigIntError {
+ kind: BigIntErrorKind::Empty,
+ }
+ }
+
+ fn invalid() -> Self {
+ ParseBigIntError {
+ kind: BigIntErrorKind::InvalidDigit,
+ }
+ }
+}
+
+impl fmt::Display for ParseBigIntError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.__description().fmt(f)
+ }
+}
+
+#[cfg(feature = "std")]
+impl Error for ParseBigIntError {
+ fn description(&self) -> &str {
+ self.__description()
+ }
+}
+
+/// The error type returned when a checked conversion regarding big integer fails.
+#[cfg(has_try_from)]
+#[derive(Debug, Copy, Clone, PartialEq, Eq)]
+pub struct TryFromBigIntError<T> {
+ original: T,
+}
+
+#[cfg(has_try_from)]
+impl<T> TryFromBigIntError<T> {
+ fn new(original: T) -> Self {
+ TryFromBigIntError { original }
+ }
+
+ fn __description(&self) -> &str {
+ "out of range conversion regarding big integer attempted"
+ }
+
+ /// Extract the original value, if available. The value will be available
+ /// if the type before conversion was either [`BigInt`] or [`BigUint`].
+ ///
+ /// [`BigInt`]: struct.BigInt.html
+ /// [`BigUint`]: struct.BigUint.html
+ pub fn into_original(self) -> T {
+ self.original
+ }
+}
+
+#[cfg(all(feature = "std", has_try_from))]
+impl<T> std::error::Error for TryFromBigIntError<T>
+where
+ T: fmt::Debug,
+{
+ fn description(&self) -> &str {
+ self.__description()
+ }
+}
+
+#[cfg(has_try_from)]
+impl<T> fmt::Display for TryFromBigIntError<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.__description().fmt(f)
+ }
+}
+
+pub use crate::biguint::BigUint;
+pub use crate::biguint::ToBigUint;
+pub use crate::biguint::U32Digits;
+pub use crate::biguint::U64Digits;
+
+pub use crate::bigint::BigInt;
+pub use crate::bigint::Sign;
+pub use crate::bigint::ToBigInt;
+
+#[cfg(feature = "rand")]
+pub use crate::bigrand::{RandBigInt, RandomBits, UniformBigInt, UniformBigUint};
+
+mod big_digit {
+ /// A `BigDigit` is a `BigUint`'s composing element.
+ #[cfg(not(u64_digit))]
+ pub(crate) type BigDigit = u32;
+ #[cfg(u64_digit)]
+ pub(crate) type BigDigit = u64;
+
+ /// A `DoubleBigDigit` is the internal type used to do the computations. Its
+ /// size is the double of the size of `BigDigit`.
+ #[cfg(not(u64_digit))]
+ pub(crate) type DoubleBigDigit = u64;
+ #[cfg(u64_digit)]
+ pub(crate) type DoubleBigDigit = u128;
+
+ /// A `SignedDoubleBigDigit` is the signed version of `DoubleBigDigit`.
+ #[cfg(not(u64_digit))]
+ pub(crate) type SignedDoubleBigDigit = i64;
+ #[cfg(u64_digit)]
+ pub(crate) type SignedDoubleBigDigit = i128;
+
+ // `DoubleBigDigit` size dependent
+ #[cfg(not(u64_digit))]
+ pub(crate) const BITS: u8 = 32;
+ #[cfg(u64_digit)]
+ pub(crate) const BITS: u8 = 64;
+
+ pub(crate) const HALF_BITS: u8 = BITS / 2;
+ pub(crate) const HALF: BigDigit = (1 << HALF_BITS) - 1;
+
+ const LO_MASK: DoubleBigDigit = (1 << BITS) - 1;
+ pub(crate) const MAX: BigDigit = LO_MASK as BigDigit;
+
+ #[inline]
+ fn get_hi(n: DoubleBigDigit) -> BigDigit {
+ (n >> BITS) as BigDigit
+ }
+ #[inline]
+ fn get_lo(n: DoubleBigDigit) -> BigDigit {
+ (n & LO_MASK) as BigDigit
+ }
+
+ /// Split one `DoubleBigDigit` into two `BigDigit`s.
+ #[inline]
+ pub(crate) fn from_doublebigdigit(n: DoubleBigDigit) -> (BigDigit, BigDigit) {
+ (get_hi(n), get_lo(n))
+ }
+
+ /// Join two `BigDigit`s into one `DoubleBigDigit`
+ #[inline]
+ pub(crate) fn to_doublebigdigit(hi: BigDigit, lo: BigDigit) -> DoubleBigDigit {
+ DoubleBigDigit::from(lo) | (DoubleBigDigit::from(hi) << BITS)
+ }
+}
diff --git a/src/macros.rs b/src/macros.rs
new file mode 100644
index 0000000..a03cb67
--- /dev/null
+++ b/src/macros.rs
@@ -0,0 +1,441 @@
+#![allow(unused_macros)]
+
+macro_rules! forward_val_val_binop {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ impl $imp<$res> for $res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: $res) -> $res {
+ // forward to val-ref
+ $imp::$method(self, &other)
+ }
+ }
+ };
+}
+
+macro_rules! forward_val_val_binop_commutative {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ impl $imp<$res> for $res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: $res) -> $res {
+ // forward to val-ref, with the larger capacity as val
+ if self.capacity() >= other.capacity() {
+ $imp::$method(self, &other)
+ } else {
+ $imp::$method(other, &self)
+ }
+ }
+ }
+ };
+}
+
+macro_rules! forward_ref_val_binop {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ impl<'a> $imp<$res> for &'a $res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: $res) -> $res {
+ // forward to ref-ref
+ $imp::$method(self, &other)
+ }
+ }
+ };
+}
+
+macro_rules! forward_ref_val_binop_commutative {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ impl<'a> $imp<$res> for &'a $res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: $res) -> $res {
+ // reverse, forward to val-ref
+ $imp::$method(other, self)
+ }
+ }
+ };
+}
+
+macro_rules! forward_val_ref_binop {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ impl<'a> $imp<&'a $res> for $res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: &$res) -> $res {
+ // forward to ref-ref
+ $imp::$method(&self, other)
+ }
+ }
+ };
+}
+
+macro_rules! forward_ref_ref_binop {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ impl<'a, 'b> $imp<&'b $res> for &'a $res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: &$res) -> $res {
+ // forward to val-ref
+ $imp::$method(self.clone(), other)
+ }
+ }
+ };
+}
+
+macro_rules! forward_ref_ref_binop_commutative {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ impl<'a, 'b> $imp<&'b $res> for &'a $res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: &$res) -> $res {
+ // forward to val-ref, choosing the larger to clone
+ if self.len() >= other.len() {
+ $imp::$method(self.clone(), other)
+ } else {
+ $imp::$method(other.clone(), self)
+ }
+ }
+ }
+ };
+}
+
+macro_rules! forward_val_assign {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ impl $imp<$res> for $res {
+ #[inline]
+ fn $method(&mut self, other: $res) {
+ self.$method(&other);
+ }
+ }
+ };
+}
+
+macro_rules! forward_val_assign_scalar {
+ (impl $imp:ident for $res:ty, $scalar:ty, $method:ident) => {
+ impl $imp<$res> for $scalar {
+ #[inline]
+ fn $method(&mut self, other: $res) {
+ self.$method(&other);
+ }
+ }
+ };
+}
+
+/// use this if val_val_binop is already implemented and the reversed order is required
+macro_rules! forward_scalar_val_val_binop_commutative {
+ (impl $imp:ident < $scalar:ty > for $res:ty, $method:ident) => {
+ impl $imp<$res> for $scalar {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: $res) -> $res {
+ $imp::$method(other, self)
+ }
+ }
+ };
+}
+
+// Forward scalar to ref-val, when reusing storage is not helpful
+macro_rules! forward_scalar_val_val_binop_to_ref_val {
+ (impl $imp:ident<$scalar:ty> for $res:ty, $method:ident) => {
+ impl $imp<$scalar> for $res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: $scalar) -> $res {
+ $imp::$method(&self, other)
+ }
+ }
+
+ impl $imp<$res> for $scalar {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: $res) -> $res {
+ $imp::$method(self, &other)
+ }
+ }
+ };
+}
+
+macro_rules! forward_scalar_ref_ref_binop_to_ref_val {
+ (impl $imp:ident<$scalar:ty> for $res:ty, $method:ident) => {
+ impl<'a, 'b> $imp<&'b $scalar> for &'a $res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: &$scalar) -> $res {
+ $imp::$method(self, *other)
+ }
+ }
+
+ impl<'a, 'b> $imp<&'a $res> for &'b $scalar {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: &$res) -> $res {
+ $imp::$method(*self, other)
+ }
+ }
+ };
+}
+
+macro_rules! forward_scalar_val_ref_binop_to_ref_val {
+ (impl $imp:ident<$scalar:ty> for $res:ty, $method:ident) => {
+ impl<'a> $imp<&'a $scalar> for $res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: &$scalar) -> $res {
+ $imp::$method(&self, *other)
+ }
+ }
+
+ impl<'a> $imp<$res> for &'a $scalar {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: $res) -> $res {
+ $imp::$method(*self, &other)
+ }
+ }
+ };
+}
+
+macro_rules! forward_scalar_val_ref_binop_to_val_val {
+ (impl $imp:ident<$scalar:ty> for $res:ty, $method:ident) => {
+ impl<'a> $imp<&'a $scalar> for $res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: &$scalar) -> $res {
+ $imp::$method(self, *other)
+ }
+ }
+
+ impl<'a> $imp<$res> for &'a $scalar {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: $res) -> $res {
+ $imp::$method(*self, other)
+ }
+ }
+ };
+}
+
+macro_rules! forward_scalar_ref_val_binop_to_val_val {
+ (impl $imp:ident < $scalar:ty > for $res:ty, $method:ident) => {
+ impl<'a> $imp<$scalar> for &'a $res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: $scalar) -> $res {
+ $imp::$method(self.clone(), other)
+ }
+ }
+
+ impl<'a> $imp<&'a $res> for $scalar {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: &$res) -> $res {
+ $imp::$method(self, other.clone())
+ }
+ }
+ };
+}
+
+macro_rules! forward_scalar_ref_ref_binop_to_val_val {
+ (impl $imp:ident<$scalar:ty> for $res:ty, $method:ident) => {
+ impl<'a, 'b> $imp<&'b $scalar> for &'a $res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: &$scalar) -> $res {
+ $imp::$method(self.clone(), *other)
+ }
+ }
+
+ impl<'a, 'b> $imp<&'a $res> for &'b $scalar {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: &$res) -> $res {
+ $imp::$method(*self, other.clone())
+ }
+ }
+ };
+}
+
+macro_rules! promote_scalars {
+ (impl $imp:ident<$promo:ty> for $res:ty, $method:ident, $( $scalar:ty ),*) => {
+ $(
+ forward_all_scalar_binop_to_val_val!(impl $imp<$scalar> for $res, $method);
+
+ impl $imp<$scalar> for $res {
+ type Output = $res;
+
+ #[allow(clippy::cast_lossless)]
+ #[inline]
+ fn $method(self, other: $scalar) -> $res {
+ $imp::$method(self, other as $promo)
+ }
+ }
+
+ impl $imp<$res> for $scalar {
+ type Output = $res;
+
+ #[allow(clippy::cast_lossless)]
+ #[inline]
+ fn $method(self, other: $res) -> $res {
+ $imp::$method(self as $promo, other)
+ }
+ }
+ )*
+ }
+}
+macro_rules! promote_scalars_assign {
+ (impl $imp:ident<$promo:ty> for $res:ty, $method:ident, $( $scalar:ty ),*) => {
+ $(
+ impl $imp<$scalar> for $res {
+ #[allow(clippy::cast_lossless)]
+ #[inline]
+ fn $method(&mut self, other: $scalar) {
+ self.$method(other as $promo);
+ }
+ }
+ )*
+ }
+}
+
+macro_rules! promote_unsigned_scalars {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ promote_scalars!(impl $imp<u32> for $res, $method, u8, u16);
+ promote_scalars!(impl $imp<UsizePromotion> for $res, $method, usize);
+ }
+}
+
+macro_rules! promote_unsigned_scalars_assign {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ promote_scalars_assign!(impl $imp<u32> for $res, $method, u8, u16);
+ promote_scalars_assign!(impl $imp<UsizePromotion> for $res, $method, usize);
+ }
+}
+
+macro_rules! promote_signed_scalars {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ promote_scalars!(impl $imp<i32> for $res, $method, i8, i16);
+ promote_scalars!(impl $imp<IsizePromotion> for $res, $method, isize);
+ }
+}
+
+macro_rules! promote_signed_scalars_assign {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ promote_scalars_assign!(impl $imp<i32> for $res, $method, i8, i16);
+ promote_scalars_assign!(impl $imp<IsizePromotion> for $res, $method, isize);
+ }
+}
+
+// Forward everything to ref-ref, when reusing storage is not helpful
+macro_rules! forward_all_binop_to_ref_ref {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ forward_val_val_binop!(impl $imp for $res, $method);
+ forward_val_ref_binop!(impl $imp for $res, $method);
+ forward_ref_val_binop!(impl $imp for $res, $method);
+ };
+}
+
+// Forward everything to val-ref, so LHS storage can be reused
+macro_rules! forward_all_binop_to_val_ref {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ forward_val_val_binop!(impl $imp for $res, $method);
+ forward_ref_val_binop!(impl $imp for $res, $method);
+ forward_ref_ref_binop!(impl $imp for $res, $method);
+ };
+}
+
+// Forward everything to val-ref, commutatively, so either LHS or RHS storage can be reused
+macro_rules! forward_all_binop_to_val_ref_commutative {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ forward_val_val_binop_commutative!(impl $imp for $res, $method);
+ forward_ref_val_binop_commutative!(impl $imp for $res, $method);
+ forward_ref_ref_binop_commutative!(impl $imp for $res, $method);
+ };
+}
+
+macro_rules! forward_all_scalar_binop_to_ref_val {
+ (impl $imp:ident<$scalar:ty> for $res:ty, $method:ident) => {
+ forward_scalar_val_val_binop_to_ref_val!(impl $imp<$scalar> for $res, $method);
+ forward_scalar_val_ref_binop_to_ref_val!(impl $imp<$scalar> for $res, $method);
+ forward_scalar_ref_ref_binop_to_ref_val!(impl $imp<$scalar> for $res, $method);
+ }
+}
+
+macro_rules! forward_all_scalar_binop_to_val_val {
+ (impl $imp:ident<$scalar:ty> for $res:ty, $method:ident) => {
+ forward_scalar_val_ref_binop_to_val_val!(impl $imp<$scalar> for $res, $method);
+ forward_scalar_ref_val_binop_to_val_val!(impl $imp<$scalar> for $res, $method);
+ forward_scalar_ref_ref_binop_to_val_val!(impl $imp<$scalar> for $res, $method);
+ }
+}
+
+macro_rules! forward_all_scalar_binop_to_val_val_commutative {
+ (impl $imp:ident<$scalar:ty> for $res:ty, $method:ident) => {
+ forward_scalar_val_val_binop_commutative!(impl $imp<$scalar> for $res, $method);
+ forward_all_scalar_binop_to_val_val!(impl $imp<$scalar> for $res, $method);
+ }
+}
+
+macro_rules! promote_all_scalars {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ promote_unsigned_scalars!(impl $imp for $res, $method);
+ promote_signed_scalars!(impl $imp for $res, $method);
+ }
+}
+
+macro_rules! promote_all_scalars_assign {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ promote_unsigned_scalars_assign!(impl $imp for $res, $method);
+ promote_signed_scalars_assign!(impl $imp for $res, $method);
+ }
+}
+
+macro_rules! impl_sum_iter_type {
+ ($res:ty) => {
+ impl<T> Sum<T> for $res
+ where
+ $res: Add<T, Output = $res>,
+ {
+ fn sum<I>(iter: I) -> Self
+ where
+ I: Iterator<Item = T>,
+ {
+ iter.fold(Zero::zero(), <$res>::add)
+ }
+ }
+ };
+}
+
+macro_rules! impl_product_iter_type {
+ ($res:ty) => {
+ impl<T> Product<T> for $res
+ where
+ $res: Mul<T, Output = $res>,
+ {
+ fn product<I>(iter: I) -> Self
+ where
+ I: Iterator<Item = T>,
+ {
+ iter.fold(One::one(), <$res>::mul)
+ }
+ }
+ };
+}
diff --git a/tests/bigint.rs b/tests/bigint.rs
new file mode 100644
index 0000000..8eff5ba
--- /dev/null
+++ b/tests/bigint.rs
@@ -0,0 +1,1402 @@
+use num_bigint::BigUint;
+use num_bigint::Sign::{Minus, NoSign, Plus};
+use num_bigint::{BigInt, ToBigInt};
+
+use std::cmp::Ordering::{Equal, Greater, Less};
+use std::collections::hash_map::RandomState;
+use std::hash::{BuildHasher, Hash, Hasher};
+use std::iter::repeat;
+use std::ops::Neg;
+use std::{f32, f64};
+use std::{i128, u128};
+use std::{i16, i32, i64, i8, isize};
+use std::{u16, u32, u64, u8, usize};
+
+use num_integer::Integer;
+use num_traits::{pow, FromPrimitive, Num, One, Pow, Signed, ToPrimitive, Zero};
+
+mod consts;
+use crate::consts::*;
+
+#[macro_use]
+mod macros;
+
+#[test]
+fn test_from_bytes_be() {
+ fn check(s: &str, result: &str) {
+ assert_eq!(
+ BigInt::from_bytes_be(Plus, s.as_bytes()),
+ BigInt::parse_bytes(result.as_bytes(), 10).unwrap()
+ );
+ }
+ check("A", "65");
+ check("AA", "16705");
+ check("AB", "16706");
+ check("Hello world!", "22405534230753963835153736737");
+ assert_eq!(BigInt::from_bytes_be(Plus, &[]), BigInt::zero());
+ assert_eq!(BigInt::from_bytes_be(Minus, &[]), BigInt::zero());
+}
+
+#[test]
+fn test_to_bytes_be() {
+ fn check(s: &str, result: &str) {
+ let b = BigInt::parse_bytes(result.as_bytes(), 10).unwrap();
+ let (sign, v) = b.to_bytes_be();
+ assert_eq!((Plus, s.as_bytes()), (sign, &*v));
+ }
+ check("A", "65");
+ check("AA", "16705");
+ check("AB", "16706");
+ check("Hello world!", "22405534230753963835153736737");
+ let b: BigInt = Zero::zero();
+ assert_eq!(b.to_bytes_be(), (NoSign, vec![0]));
+
+ // Test with leading/trailing zero bytes and a full BigDigit of value 0
+ let b = BigInt::from_str_radix("00010000000000000200", 16).unwrap();
+ assert_eq!(b.to_bytes_be(), (Plus, vec![1, 0, 0, 0, 0, 0, 0, 2, 0]));
+}
+
+#[test]
+fn test_from_bytes_le() {
+ fn check(s: &str, result: &str) {
+ assert_eq!(
+ BigInt::from_bytes_le(Plus, s.as_bytes()),
+ BigInt::parse_bytes(result.as_bytes(), 10).unwrap()
+ );
+ }
+ check("A", "65");
+ check("AA", "16705");
+ check("BA", "16706");
+ check("!dlrow olleH", "22405534230753963835153736737");
+ assert_eq!(BigInt::from_bytes_le(Plus, &[]), BigInt::zero());
+ assert_eq!(BigInt::from_bytes_le(Minus, &[]), BigInt::zero());
+}
+
+#[test]
+fn test_to_bytes_le() {
+ fn check(s: &str, result: &str) {
+ let b = BigInt::parse_bytes(result.as_bytes(), 10).unwrap();
+ let (sign, v) = b.to_bytes_le();
+ assert_eq!((Plus, s.as_bytes()), (sign, &*v));
+ }
+ check("A", "65");
+ check("AA", "16705");
+ check("BA", "16706");
+ check("!dlrow olleH", "22405534230753963835153736737");
+ let b: BigInt = Zero::zero();
+ assert_eq!(b.to_bytes_le(), (NoSign, vec![0]));
+
+ // Test with leading/trailing zero bytes and a full BigDigit of value 0
+ let b = BigInt::from_str_radix("00010000000000000200", 16).unwrap();
+ assert_eq!(b.to_bytes_le(), (Plus, vec![0, 2, 0, 0, 0, 0, 0, 0, 1]));
+}
+
+#[test]
+fn test_to_signed_bytes_le() {
+ fn check(s: &str, result: Vec<u8>) {
+ assert_eq!(
+ BigInt::parse_bytes(s.as_bytes(), 10)
+ .unwrap()
+ .to_signed_bytes_le(),
+ result
+ );
+ }
+
+ check("0", vec![0]);
+ check("32767", vec![0xff, 0x7f]);
+ check("-1", vec![0xff]);
+ check("16777216", vec![0, 0, 0, 1]);
+ check("-100", vec![156]);
+ check("-8388608", vec![0, 0, 0x80]);
+ check("-192", vec![0x40, 0xff]);
+ check("128", vec![0x80, 0])
+}
+
+#[test]
+fn test_from_signed_bytes_le() {
+ fn check(s: &[u8], result: &str) {
+ assert_eq!(
+ BigInt::from_signed_bytes_le(s),
+ BigInt::parse_bytes(result.as_bytes(), 10).unwrap()
+ );
+ }
+
+ check(&[], "0");
+ check(&[0], "0");
+ check(&[0; 10], "0");
+ check(&[0xff, 0x7f], "32767");
+ check(&[0xff], "-1");
+ check(&[0, 0, 0, 1], "16777216");
+ check(&[156], "-100");
+ check(&[0, 0, 0x80], "-8388608");
+ check(&[0xff; 10], "-1");
+ check(&[0x40, 0xff], "-192");
+}
+
+#[test]
+fn test_to_signed_bytes_be() {
+ fn check(s: &str, result: Vec<u8>) {
+ assert_eq!(
+ BigInt::parse_bytes(s.as_bytes(), 10)
+ .unwrap()
+ .to_signed_bytes_be(),
+ result
+ );
+ }
+
+ check("0", vec![0]);
+ check("32767", vec![0x7f, 0xff]);
+ check("-1", vec![255]);
+ check("16777216", vec![1, 0, 0, 0]);
+ check("-100", vec![156]);
+ check("-8388608", vec![128, 0, 0]);
+ check("-192", vec![0xff, 0x40]);
+ check("128", vec![0, 0x80]);
+}
+
+#[test]
+fn test_from_signed_bytes_be() {
+ fn check(s: &[u8], result: &str) {
+ assert_eq!(
+ BigInt::from_signed_bytes_be(s),
+ BigInt::parse_bytes(result.as_bytes(), 10).unwrap()
+ );
+ }
+
+ check(&[], "0");
+ check(&[0], "0");
+ check(&[0; 10], "0");
+ check(&[127, 255], "32767");
+ check(&[255], "-1");
+ check(&[1, 0, 0, 0], "16777216");
+ check(&[156], "-100");
+ check(&[128, 0, 0], "-8388608");
+ check(&[255; 10], "-1");
+ check(&[0xff, 0x40], "-192");
+}
+
+#[test]
+fn test_signed_bytes_be_round_trip() {
+ for i in -0x1FFFF..0x20000 {
+ let n = BigInt::from(i);
+ assert_eq!(n, BigInt::from_signed_bytes_be(&n.to_signed_bytes_be()));
+ }
+}
+
+#[test]
+fn test_signed_bytes_le_round_trip() {
+ for i in -0x1FFFF..0x20000 {
+ let n = BigInt::from(i);
+ assert_eq!(n, BigInt::from_signed_bytes_le(&n.to_signed_bytes_le()));
+ }
+}
+
+#[test]
+fn test_cmp() {
+ let vs: [&[u32]; 4] = [&[2 as u32], &[1, 1], &[2, 1], &[1, 1, 1]];
+ let mut nums = Vec::new();
+ for s in vs.iter().rev() {
+ nums.push(BigInt::from_slice(Minus, *s));
+ }
+ nums.push(Zero::zero());
+ nums.extend(vs.iter().map(|s| BigInt::from_slice(Plus, *s)));
+
+ for (i, ni) in nums.iter().enumerate() {
+ for (j0, nj) in nums[i..].iter().enumerate() {
+ let j = i + j0;
+ if i == j {
+ assert_eq!(ni.cmp(nj), Equal);
+ assert_eq!(nj.cmp(ni), Equal);
+ assert_eq!(ni, nj);
+ assert!(!(ni != nj));
+ assert!(ni <= nj);
+ assert!(ni >= nj);
+ assert!(!(ni < nj));
+ assert!(!(ni > nj));
+ } else {
+ assert_eq!(ni.cmp(nj), Less);
+ assert_eq!(nj.cmp(ni), Greater);
+
+ assert!(!(ni == nj));
+ assert!(ni != nj);
+
+ assert!(ni <= nj);
+ assert!(!(ni >= nj));
+ assert!(ni < nj);
+ assert!(!(ni > nj));
+
+ assert!(!(nj <= ni));
+ assert!(nj >= ni);
+ assert!(!(nj < ni));
+ assert!(nj > ni);
+ }
+ }
+ }
+}
+
+fn hash<T: Hash>(x: &T) -> u64 {
+ let mut hasher = <RandomState as BuildHasher>::Hasher::new();
+ x.hash(&mut hasher);
+ hasher.finish()
+}
+
+#[test]
+fn test_hash() {
+ let a = BigInt::new(NoSign, vec![]);
+ let b = BigInt::new(NoSign, vec![0]);
+ let c = BigInt::new(Plus, vec![1]);
+ let d = BigInt::new(Plus, vec![1, 0, 0, 0, 0, 0]);
+ let e = BigInt::new(Plus, vec![0, 0, 0, 0, 0, 1]);
+ let f = BigInt::new(Minus, vec![1]);
+ assert!(hash(&a) == hash(&b));
+ assert!(hash(&b) != hash(&c));
+ assert!(hash(&c) == hash(&d));
+ assert!(hash(&d) != hash(&e));
+ assert!(hash(&c) != hash(&f));
+}
+
+#[test]
+fn test_convert_i64() {
+ fn check(b1: BigInt, i: i64) {
+ let b2: BigInt = FromPrimitive::from_i64(i).unwrap();
+ assert!(b1 == b2);
+ assert!(b1.to_i64().unwrap() == i);
+ }
+
+ check(Zero::zero(), 0);
+ check(One::one(), 1);
+ check(i64::MIN.to_bigint().unwrap(), i64::MIN);
+ check(i64::MAX.to_bigint().unwrap(), i64::MAX);
+
+ assert_eq!((i64::MAX as u64 + 1).to_bigint().unwrap().to_i64(), None);
+
+ assert_eq!(
+ BigInt::from_biguint(Plus, BigUint::new(vec![1, 2, 3, 4, 5])).to_i64(),
+ None
+ );
+
+ assert_eq!(
+ BigInt::from_biguint(Minus, BigUint::new(vec![1, 0, 0, 1 << 31])).to_i64(),
+ None
+ );
+
+ assert_eq!(
+ BigInt::from_biguint(Minus, BigUint::new(vec![1, 2, 3, 4, 5])).to_i64(),
+ None
+ );
+}
+
+#[test]
+fn test_convert_i128() {
+ fn check(b1: BigInt, i: i128) {
+ let b2: BigInt = FromPrimitive::from_i128(i).unwrap();
+ assert!(b1 == b2);
+ assert!(b1.to_i128().unwrap() == i);
+ }
+
+ check(Zero::zero(), 0);
+ check(One::one(), 1);
+ check(i128::MIN.to_bigint().unwrap(), i128::MIN);
+ check(i128::MAX.to_bigint().unwrap(), i128::MAX);
+
+ assert_eq!((i128::MAX as u128 + 1).to_bigint().unwrap().to_i128(), None);
+
+ assert_eq!(
+ BigInt::from_biguint(Plus, BigUint::new(vec![1, 2, 3, 4, 5])).to_i128(),
+ None
+ );
+
+ assert_eq!(
+ BigInt::from_biguint(Minus, BigUint::new(vec![1, 0, 0, 1 << 31])).to_i128(),
+ None
+ );
+
+ assert_eq!(
+ BigInt::from_biguint(Minus, BigUint::new(vec![1, 2, 3, 4, 5])).to_i128(),
+ None
+ );
+}
+
+#[test]
+fn test_convert_u64() {
+ fn check(b1: BigInt, u: u64) {
+ let b2: BigInt = FromPrimitive::from_u64(u).unwrap();
+ assert!(b1 == b2);
+ assert!(b1.to_u64().unwrap() == u);
+ }
+
+ check(Zero::zero(), 0);
+ check(One::one(), 1);
+ check(u64::MIN.to_bigint().unwrap(), u64::MIN);
+ check(u64::MAX.to_bigint().unwrap(), u64::MAX);
+
+ assert_eq!(
+ BigInt::from_biguint(Plus, BigUint::new(vec![1, 2, 3, 4, 5])).to_u64(),
+ None
+ );
+
+ let max_value: BigUint = FromPrimitive::from_u64(u64::MAX).unwrap();
+ assert_eq!(BigInt::from_biguint(Minus, max_value).to_u64(), None);
+ assert_eq!(
+ BigInt::from_biguint(Minus, BigUint::new(vec![1, 2, 3, 4, 5])).to_u64(),
+ None
+ );
+}
+
+#[test]
+fn test_convert_u128() {
+ fn check(b1: BigInt, u: u128) {
+ let b2: BigInt = FromPrimitive::from_u128(u).unwrap();
+ assert!(b1 == b2);
+ assert!(b1.to_u128().unwrap() == u);
+ }
+
+ check(Zero::zero(), 0);
+ check(One::one(), 1);
+ check(u128::MIN.to_bigint().unwrap(), u128::MIN);
+ check(u128::MAX.to_bigint().unwrap(), u128::MAX);
+
+ assert_eq!(
+ BigInt::from_biguint(Plus, BigUint::new(vec![1, 2, 3, 4, 5])).to_u128(),
+ None
+ );
+
+ let max_value: BigUint = FromPrimitive::from_u128(u128::MAX).unwrap();
+ assert_eq!(BigInt::from_biguint(Minus, max_value).to_u128(), None);
+ assert_eq!(
+ BigInt::from_biguint(Minus, BigUint::new(vec![1, 2, 3, 4, 5])).to_u128(),
+ None
+ );
+}
+
+#[test]
+#[allow(clippy::float_cmp)]
+fn test_convert_f32() {
+ fn check(b1: &BigInt, f: f32) {
+ let b2 = BigInt::from_f32(f).unwrap();
+ assert_eq!(b1, &b2);
+ assert_eq!(b1.to_f32().unwrap(), f);
+ let neg_b1 = -b1;
+ let neg_b2 = BigInt::from_f32(-f).unwrap();
+ assert_eq!(neg_b1, neg_b2);
+ assert_eq!(neg_b1.to_f32().unwrap(), -f);
+ }
+
+ check(&BigInt::zero(), 0.0);
+ check(&BigInt::one(), 1.0);
+ check(&BigInt::from(u16::MAX), pow(2.0_f32, 16) - 1.0);
+ check(&BigInt::from(1u64 << 32), pow(2.0_f32, 32));
+ check(&BigInt::from_slice(Plus, &[0, 0, 1]), pow(2.0_f32, 64));
+ check(
+ &((BigInt::one() << 100) + (BigInt::one() << 123)),
+ pow(2.0_f32, 100) + pow(2.0_f32, 123),
+ );
+ check(&(BigInt::one() << 127), pow(2.0_f32, 127));
+ check(&(BigInt::from((1u64 << 24) - 1) << (128 - 24)), f32::MAX);
+
+ // keeping all 24 digits with the bits at different offsets to the BigDigits
+ let x: u32 = 0b00000000101111011111011011011101;
+ let mut f = x as f32;
+ let mut b = BigInt::from(x);
+ for _ in 0..64 {
+ check(&b, f);
+ f *= 2.0;
+ b <<= 1;
+ }
+
+ // this number when rounded to f64 then f32 isn't the same as when rounded straight to f32
+ let mut n: i64 = 0b0000000000111111111111111111111111011111111111111111111111111111;
+ assert!((n as f64) as f32 != n as f32);
+ assert_eq!(BigInt::from(n).to_f32(), Some(n as f32));
+ n = -n;
+ assert!((n as f64) as f32 != n as f32);
+ assert_eq!(BigInt::from(n).to_f32(), Some(n as f32));
+
+ // test rounding up with the bits at different offsets to the BigDigits
+ let mut f = ((1u64 << 25) - 1) as f32;
+ let mut b = BigInt::from(1u64 << 25);
+ for _ in 0..64 {
+ assert_eq!(b.to_f32(), Some(f));
+ f *= 2.0;
+ b <<= 1;
+ }
+
+ // rounding
+ assert_eq!(
+ BigInt::from_f32(-f32::consts::PI),
+ Some(BigInt::from(-3i32))
+ );
+ assert_eq!(BigInt::from_f32(-f32::consts::E), Some(BigInt::from(-2i32)));
+ assert_eq!(BigInt::from_f32(-0.99999), Some(BigInt::zero()));
+ assert_eq!(BigInt::from_f32(-0.5), Some(BigInt::zero()));
+ assert_eq!(BigInt::from_f32(-0.0), Some(BigInt::zero()));
+ assert_eq!(
+ BigInt::from_f32(f32::MIN_POSITIVE / 2.0),
+ Some(BigInt::zero())
+ );
+ assert_eq!(BigInt::from_f32(f32::MIN_POSITIVE), Some(BigInt::zero()));
+ assert_eq!(BigInt::from_f32(0.5), Some(BigInt::zero()));
+ assert_eq!(BigInt::from_f32(0.99999), Some(BigInt::zero()));
+ assert_eq!(BigInt::from_f32(f32::consts::E), Some(BigInt::from(2u32)));
+ assert_eq!(BigInt::from_f32(f32::consts::PI), Some(BigInt::from(3u32)));
+
+ // special float values
+ assert_eq!(BigInt::from_f32(f32::NAN), None);
+ assert_eq!(BigInt::from_f32(f32::INFINITY), None);
+ assert_eq!(BigInt::from_f32(f32::NEG_INFINITY), None);
+
+ // largest BigInt that will round to a finite f32 value
+ let big_num = (BigInt::one() << 128u8) - 1u8 - (BigInt::one() << (128u8 - 25));
+ assert_eq!(big_num.to_f32(), Some(f32::MAX));
+ assert_eq!((&big_num + 1u8).to_f32(), Some(f32::INFINITY));
+ assert_eq!((-&big_num).to_f32(), Some(f32::MIN));
+ assert_eq!(((-&big_num) - 1u8).to_f32(), Some(f32::NEG_INFINITY));
+
+ assert_eq!(
+ ((BigInt::one() << 128u8) - 1u8).to_f32(),
+ Some(f32::INFINITY)
+ );
+ assert_eq!((BigInt::one() << 128u8).to_f32(), Some(f32::INFINITY));
+ assert_eq!(
+ (-((BigInt::one() << 128u8) - 1u8)).to_f32(),
+ Some(f32::NEG_INFINITY)
+ );
+ assert_eq!(
+ (-(BigInt::one() << 128u8)).to_f32(),
+ Some(f32::NEG_INFINITY)
+ );
+}
+
+#[test]
+#[allow(clippy::float_cmp)]
+fn test_convert_f64() {
+ fn check(b1: &BigInt, f: f64) {
+ let b2 = BigInt::from_f64(f).unwrap();
+ assert_eq!(b1, &b2);
+ assert_eq!(b1.to_f64().unwrap(), f);
+ let neg_b1 = -b1;
+ let neg_b2 = BigInt::from_f64(-f).unwrap();
+ assert_eq!(neg_b1, neg_b2);
+ assert_eq!(neg_b1.to_f64().unwrap(), -f);
+ }
+
+ check(&BigInt::zero(), 0.0);
+ check(&BigInt::one(), 1.0);
+ check(&BigInt::from(u32::MAX), pow(2.0_f64, 32) - 1.0);
+ check(&BigInt::from(1u64 << 32), pow(2.0_f64, 32));
+ check(&BigInt::from_slice(Plus, &[0, 0, 1]), pow(2.0_f64, 64));
+ check(
+ &((BigInt::one() << 100) + (BigInt::one() << 152)),
+ pow(2.0_f64, 100) + pow(2.0_f64, 152),
+ );
+ check(&(BigInt::one() << 1023), pow(2.0_f64, 1023));
+ check(&(BigInt::from((1u64 << 53) - 1) << (1024 - 53)), f64::MAX);
+
+ // keeping all 53 digits with the bits at different offsets to the BigDigits
+ let x: u64 = 0b0000000000011110111110110111111101110111101111011111011011011101;
+ let mut f = x as f64;
+ let mut b = BigInt::from(x);
+ for _ in 0..128 {
+ check(&b, f);
+ f *= 2.0;
+ b <<= 1;
+ }
+
+ // test rounding up with the bits at different offsets to the BigDigits
+ let mut f = ((1u64 << 54) - 1) as f64;
+ let mut b = BigInt::from(1u64 << 54);
+ for _ in 0..128 {
+ assert_eq!(b.to_f64(), Some(f));
+ f *= 2.0;
+ b <<= 1;
+ }
+
+ // rounding
+ assert_eq!(
+ BigInt::from_f64(-f64::consts::PI),
+ Some(BigInt::from(-3i32))
+ );
+ assert_eq!(BigInt::from_f64(-f64::consts::E), Some(BigInt::from(-2i32)));
+ assert_eq!(BigInt::from_f64(-0.99999), Some(BigInt::zero()));
+ assert_eq!(BigInt::from_f64(-0.5), Some(BigInt::zero()));
+ assert_eq!(BigInt::from_f64(-0.0), Some(BigInt::zero()));
+ assert_eq!(
+ BigInt::from_f64(f64::MIN_POSITIVE / 2.0),
+ Some(BigInt::zero())
+ );
+ assert_eq!(BigInt::from_f64(f64::MIN_POSITIVE), Some(BigInt::zero()));
+ assert_eq!(BigInt::from_f64(0.5), Some(BigInt::zero()));
+ assert_eq!(BigInt::from_f64(0.99999), Some(BigInt::zero()));
+ assert_eq!(BigInt::from_f64(f64::consts::E), Some(BigInt::from(2u32)));
+ assert_eq!(BigInt::from_f64(f64::consts::PI), Some(BigInt::from(3u32)));
+
+ // special float values
+ assert_eq!(BigInt::from_f64(f64::NAN), None);
+ assert_eq!(BigInt::from_f64(f64::INFINITY), None);
+ assert_eq!(BigInt::from_f64(f64::NEG_INFINITY), None);
+
+ // largest BigInt that will round to a finite f64 value
+ let big_num = (BigInt::one() << 1024u16) - 1u8 - (BigInt::one() << (1024u16 - 54));
+ assert_eq!(big_num.to_f64(), Some(f64::MAX));
+ assert_eq!((&big_num + 1u8).to_f64(), Some(f64::INFINITY));
+ assert_eq!((-&big_num).to_f64(), Some(f64::MIN));
+ assert_eq!(((-&big_num) - 1u8).to_f64(), Some(f64::NEG_INFINITY));
+
+ assert_eq!(
+ ((BigInt::one() << 1024u16) - 1u8).to_f64(),
+ Some(f64::INFINITY)
+ );
+ assert_eq!((BigInt::one() << 1024u16).to_f64(), Some(f64::INFINITY));
+ assert_eq!(
+ (-((BigInt::one() << 1024u16) - 1u8)).to_f64(),
+ Some(f64::NEG_INFINITY)
+ );
+ assert_eq!(
+ (-(BigInt::one() << 1024u16)).to_f64(),
+ Some(f64::NEG_INFINITY)
+ );
+}
+
+#[test]
+fn test_convert_to_biguint() {
+ fn check(n: BigInt, ans_1: BigUint) {
+ assert_eq!(n.to_biguint().unwrap(), ans_1);
+ assert_eq!(n.to_biguint().unwrap().to_bigint().unwrap(), n);
+ }
+ let zero: BigInt = Zero::zero();
+ let unsigned_zero: BigUint = Zero::zero();
+ let positive = BigInt::from_biguint(Plus, BigUint::new(vec![1, 2, 3]));
+ let negative = -&positive;
+
+ check(zero, unsigned_zero);
+ check(positive, BigUint::new(vec![1, 2, 3]));
+
+ assert_eq!(negative.to_biguint(), None);
+}
+
+#[test]
+fn test_convert_from_uint() {
+ macro_rules! check {
+ ($ty:ident, $max:expr) => {
+ assert_eq!(BigInt::from($ty::zero()), BigInt::zero());
+ assert_eq!(BigInt::from($ty::one()), BigInt::one());
+ assert_eq!(BigInt::from($ty::MAX - $ty::one()), $max - BigInt::one());
+ assert_eq!(BigInt::from($ty::MAX), $max);
+ };
+ }
+
+ check!(u8, BigInt::from_slice(Plus, &[u8::MAX as u32]));
+ check!(u16, BigInt::from_slice(Plus, &[u16::MAX as u32]));
+ check!(u32, BigInt::from_slice(Plus, &[u32::MAX]));
+ check!(u64, BigInt::from_slice(Plus, &[u32::MAX, u32::MAX]));
+ check!(
+ u128,
+ BigInt::from_slice(Plus, &[u32::MAX, u32::MAX, u32::MAX, u32::MAX])
+ );
+ check!(usize, BigInt::from(usize::MAX as u64));
+}
+
+#[test]
+fn test_convert_from_int() {
+ macro_rules! check {
+ ($ty:ident, $min:expr, $max:expr) => {
+ assert_eq!(BigInt::from($ty::MIN), $min);
+ assert_eq!(BigInt::from($ty::MIN + $ty::one()), $min + BigInt::one());
+ assert_eq!(BigInt::from(-$ty::one()), -BigInt::one());
+ assert_eq!(BigInt::from($ty::zero()), BigInt::zero());
+ assert_eq!(BigInt::from($ty::one()), BigInt::one());
+ assert_eq!(BigInt::from($ty::MAX - $ty::one()), $max - BigInt::one());
+ assert_eq!(BigInt::from($ty::MAX), $max);
+ };
+ }
+
+ check!(
+ i8,
+ BigInt::from_slice(Minus, &[1 << 7]),
+ BigInt::from_slice(Plus, &[i8::MAX as u32])
+ );
+ check!(
+ i16,
+ BigInt::from_slice(Minus, &[1 << 15]),
+ BigInt::from_slice(Plus, &[i16::MAX as u32])
+ );
+ check!(
+ i32,
+ BigInt::from_slice(Minus, &[1 << 31]),
+ BigInt::from_slice(Plus, &[i32::MAX as u32])
+ );
+ check!(
+ i64,
+ BigInt::from_slice(Minus, &[0, 1 << 31]),
+ BigInt::from_slice(Plus, &[u32::MAX, i32::MAX as u32])
+ );
+ check!(
+ i128,
+ BigInt::from_slice(Minus, &[0, 0, 0, 1 << 31]),
+ BigInt::from_slice(Plus, &[u32::MAX, u32::MAX, u32::MAX, i32::MAX as u32])
+ );
+ check!(
+ isize,
+ BigInt::from(isize::MIN as i64),
+ BigInt::from(isize::MAX as i64)
+ );
+}
+
+#[test]
+fn test_convert_from_biguint() {
+ assert_eq!(BigInt::from(BigUint::zero()), BigInt::zero());
+ assert_eq!(BigInt::from(BigUint::one()), BigInt::one());
+ assert_eq!(
+ BigInt::from(BigUint::from_slice(&[1, 2, 3])),
+ BigInt::from_slice(Plus, &[1, 2, 3])
+ );
+}
+
+#[test]
+fn test_add() {
+ for elm in SUM_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+ let (na, nb, nc) = (-&a, -&b, -&c);
+
+ assert_op!(a + b == c);
+ assert_op!(b + a == c);
+ assert_op!(c + na == b);
+ assert_op!(c + nb == a);
+ assert_op!(a + nc == nb);
+ assert_op!(b + nc == na);
+ assert_op!(na + nb == nc);
+ assert_op!(a + na == BigInt::zero());
+
+ assert_assign_op!(a += b == c);
+ assert_assign_op!(b += a == c);
+ assert_assign_op!(c += na == b);
+ assert_assign_op!(c += nb == a);
+ assert_assign_op!(a += nc == nb);
+ assert_assign_op!(b += nc == na);
+ assert_assign_op!(na += nb == nc);
+ assert_assign_op!(a += na == BigInt::zero());
+ }
+}
+
+#[test]
+fn test_sub() {
+ for elm in SUM_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+ let (na, nb, nc) = (-&a, -&b, -&c);
+
+ assert_op!(c - a == b);
+ assert_op!(c - b == a);
+ assert_op!(nb - a == nc);
+ assert_op!(na - b == nc);
+ assert_op!(b - na == c);
+ assert_op!(a - nb == c);
+ assert_op!(nc - na == nb);
+ assert_op!(a - a == BigInt::zero());
+
+ assert_assign_op!(c -= a == b);
+ assert_assign_op!(c -= b == a);
+ assert_assign_op!(nb -= a == nc);
+ assert_assign_op!(na -= b == nc);
+ assert_assign_op!(b -= na == c);
+ assert_assign_op!(a -= nb == c);
+ assert_assign_op!(nc -= na == nb);
+ assert_assign_op!(a -= a == BigInt::zero());
+ }
+}
+
+#[test]
+fn test_mul() {
+ for elm in MUL_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+ let (na, nb, nc) = (-&a, -&b, -&c);
+
+ assert_op!(a * b == c);
+ assert_op!(b * a == c);
+ assert_op!(na * nb == c);
+
+ assert_op!(na * b == nc);
+ assert_op!(nb * a == nc);
+
+ assert_assign_op!(a *= b == c);
+ assert_assign_op!(b *= a == c);
+ assert_assign_op!(na *= nb == c);
+
+ assert_assign_op!(na *= b == nc);
+ assert_assign_op!(nb *= a == nc);
+ }
+
+ for elm in DIV_REM_QUADRUPLES.iter() {
+ let (a_vec, b_vec, c_vec, d_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+ let d = BigInt::from_slice(Plus, d_vec);
+
+ assert!(a == &b * &c + &d);
+ assert!(a == &c * &b + &d);
+ }
+}
+
+#[test]
+fn test_div_mod_floor() {
+ fn check_sub(a: &BigInt, b: &BigInt, ans_d: &BigInt, ans_m: &BigInt) {
+ let (d, m) = a.div_mod_floor(b);
+ assert_eq!(d, a.div_floor(b));
+ assert_eq!(m, a.mod_floor(b));
+ if !m.is_zero() {
+ assert_eq!(m.sign(), b.sign());
+ }
+ assert!(m.abs() <= b.abs());
+ assert!(*a == b * &d + &m);
+ assert!(d == *ans_d);
+ assert!(m == *ans_m);
+ }
+
+ fn check(a: &BigInt, b: &BigInt, d: &BigInt, m: &BigInt) {
+ if m.is_zero() {
+ check_sub(a, b, d, m);
+ check_sub(a, &b.neg(), &d.neg(), m);
+ check_sub(&a.neg(), b, &d.neg(), m);
+ check_sub(&a.neg(), &b.neg(), d, m);
+ } else {
+ let one: BigInt = One::one();
+ check_sub(a, b, d, m);
+ check_sub(a, &b.neg(), &(d.neg() - &one), &(m - b));
+ check_sub(&a.neg(), b, &(d.neg() - &one), &(b - m));
+ check_sub(&a.neg(), &b.neg(), d, &m.neg());
+ }
+ }
+
+ for elm in MUL_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+
+ if !a.is_zero() {
+ check(&c, &a, &b, &Zero::zero());
+ }
+ if !b.is_zero() {
+ check(&c, &b, &a, &Zero::zero());
+ }
+ }
+
+ for elm in DIV_REM_QUADRUPLES.iter() {
+ let (a_vec, b_vec, c_vec, d_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+ let d = BigInt::from_slice(Plus, d_vec);
+
+ if !b.is_zero() {
+ check(&a, &b, &c, &d);
+ }
+ }
+}
+
+#[test]
+fn test_div_rem() {
+ fn check_sub(a: &BigInt, b: &BigInt, ans_q: &BigInt, ans_r: &BigInt) {
+ let (q, r) = a.div_rem(b);
+ if !r.is_zero() {
+ assert_eq!(r.sign(), a.sign());
+ }
+ assert!(r.abs() <= b.abs());
+ assert!(*a == b * &q + &r);
+ assert!(q == *ans_q);
+ assert!(r == *ans_r);
+
+ let (a, b, ans_q, ans_r) = (a.clone(), b.clone(), ans_q.clone(), ans_r.clone());
+ assert_op!(a / b == ans_q);
+ assert_op!(a % b == ans_r);
+ assert_assign_op!(a /= b == ans_q);
+ assert_assign_op!(a %= b == ans_r);
+ }
+
+ fn check(a: &BigInt, b: &BigInt, q: &BigInt, r: &BigInt) {
+ check_sub(a, b, q, r);
+ check_sub(a, &b.neg(), &q.neg(), r);
+ check_sub(&a.neg(), b, &q.neg(), &r.neg());
+ check_sub(&a.neg(), &b.neg(), q, &r.neg());
+ }
+ for elm in MUL_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+
+ if !a.is_zero() {
+ check(&c, &a, &b, &Zero::zero());
+ }
+ if !b.is_zero() {
+ check(&c, &b, &a, &Zero::zero());
+ }
+ }
+
+ for elm in DIV_REM_QUADRUPLES.iter() {
+ let (a_vec, b_vec, c_vec, d_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+ let d = BigInt::from_slice(Plus, d_vec);
+
+ if !b.is_zero() {
+ check(&a, &b, &c, &d);
+ }
+ }
+}
+
+#[test]
+fn test_div_ceil() {
+ fn check_sub(a: &BigInt, b: &BigInt, ans_d: &BigInt) {
+ assert_eq!(a.div_ceil(b), *ans_d);
+ }
+
+ fn check(a: &BigInt, b: &BigInt, d: &BigInt, m: &BigInt) {
+ if m.is_zero() {
+ check_sub(a, b, d);
+ check_sub(a, &b.neg(), &d.neg());
+ check_sub(&a.neg(), b, &d.neg());
+ check_sub(&a.neg(), &b.neg(), d);
+ } else {
+ check_sub(a, b, &(d + 1));
+ check_sub(a, &b.neg(), &d.neg());
+ check_sub(&a.neg(), b, &d.neg());
+ check_sub(&a.neg(), &b.neg(), &(d + 1));
+ }
+ }
+
+ for elm in MUL_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+
+ if !a.is_zero() {
+ check(&c, &a, &b, &Zero::zero());
+ }
+ if !b.is_zero() {
+ check(&c, &b, &a, &Zero::zero());
+ }
+ }
+
+ for elm in DIV_REM_QUADRUPLES.iter() {
+ let (a_vec, b_vec, c_vec, d_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+ let d = BigInt::from_slice(Plus, d_vec);
+
+ if !b.is_zero() {
+ check(&a, &b, &c, &d);
+ }
+ }
+}
+
+#[test]
+fn test_checked_add() {
+ for elm in SUM_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+
+ assert!(a.checked_add(&b).unwrap() == c);
+ assert!(b.checked_add(&a).unwrap() == c);
+ assert!(c.checked_add(&(-&a)).unwrap() == b);
+ assert!(c.checked_add(&(-&b)).unwrap() == a);
+ assert!(a.checked_add(&(-&c)).unwrap() == (-&b));
+ assert!(b.checked_add(&(-&c)).unwrap() == (-&a));
+ assert!((-&a).checked_add(&(-&b)).unwrap() == (-&c));
+ assert!(a.checked_add(&(-&a)).unwrap() == BigInt::zero());
+ }
+}
+
+#[test]
+fn test_checked_sub() {
+ for elm in SUM_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+
+ assert!(c.checked_sub(&a).unwrap() == b);
+ assert!(c.checked_sub(&b).unwrap() == a);
+ assert!((-&b).checked_sub(&a).unwrap() == (-&c));
+ assert!((-&a).checked_sub(&b).unwrap() == (-&c));
+ assert!(b.checked_sub(&(-&a)).unwrap() == c);
+ assert!(a.checked_sub(&(-&b)).unwrap() == c);
+ assert!((-&c).checked_sub(&(-&a)).unwrap() == (-&b));
+ assert!(a.checked_sub(&a).unwrap() == BigInt::zero());
+ }
+}
+
+#[test]
+fn test_checked_mul() {
+ for elm in MUL_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+
+ assert!(a.checked_mul(&b).unwrap() == c);
+ assert!(b.checked_mul(&a).unwrap() == c);
+
+ assert!((-&a).checked_mul(&b).unwrap() == -&c);
+ assert!((-&b).checked_mul(&a).unwrap() == -&c);
+ }
+
+ for elm in DIV_REM_QUADRUPLES.iter() {
+ let (a_vec, b_vec, c_vec, d_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+ let d = BigInt::from_slice(Plus, d_vec);
+
+ assert!(a == b.checked_mul(&c).unwrap() + &d);
+ assert!(a == c.checked_mul(&b).unwrap() + &d);
+ }
+}
+#[test]
+fn test_checked_div() {
+ for elm in MUL_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+
+ if !a.is_zero() {
+ assert!(c.checked_div(&a).unwrap() == b);
+ assert!((-&c).checked_div(&(-&a)).unwrap() == b);
+ assert!((-&c).checked_div(&a).unwrap() == -&b);
+ }
+ if !b.is_zero() {
+ assert!(c.checked_div(&b).unwrap() == a);
+ assert!((-&c).checked_div(&(-&b)).unwrap() == a);
+ assert!((-&c).checked_div(&b).unwrap() == -&a);
+ }
+
+ assert!(c.checked_div(&Zero::zero()).is_none());
+ assert!((-&c).checked_div(&Zero::zero()).is_none());
+ }
+}
+
+#[test]
+fn test_gcd() {
+ fn check(a: isize, b: isize, c: isize) {
+ let big_a: BigInt = FromPrimitive::from_isize(a).unwrap();
+ let big_b: BigInt = FromPrimitive::from_isize(b).unwrap();
+ let big_c: BigInt = FromPrimitive::from_isize(c).unwrap();
+
+ assert_eq!(big_a.gcd(&big_b), big_c);
+ assert_eq!(big_a.extended_gcd(&big_b).gcd, big_c);
+ assert_eq!(big_a.gcd_lcm(&big_b).0, big_c);
+ assert_eq!(big_a.extended_gcd_lcm(&big_b).0.gcd, big_c);
+ }
+
+ check(10, 2, 2);
+ check(10, 3, 1);
+ check(0, 3, 3);
+ check(3, 3, 3);
+ check(56, 42, 14);
+ check(3, -3, 3);
+ check(-6, 3, 3);
+ check(-4, -2, 2);
+}
+
+#[test]
+fn test_lcm() {
+ fn check(a: isize, b: isize, c: isize) {
+ let big_a: BigInt = FromPrimitive::from_isize(a).unwrap();
+ let big_b: BigInt = FromPrimitive::from_isize(b).unwrap();
+ let big_c: BigInt = FromPrimitive::from_isize(c).unwrap();
+
+ assert_eq!(big_a.lcm(&big_b), big_c);
+ assert_eq!(big_a.gcd_lcm(&big_b).1, big_c);
+ assert_eq!(big_a.extended_gcd_lcm(&big_b).1, big_c);
+ }
+
+ check(0, 0, 0);
+ check(1, 0, 0);
+ check(0, 1, 0);
+ check(1, 1, 1);
+ check(-1, 1, 1);
+ check(1, -1, 1);
+ check(-1, -1, 1);
+ check(8, 9, 72);
+ check(11, 5, 55);
+}
+
+#[test]
+fn test_next_multiple_of() {
+ assert_eq!(
+ BigInt::from(16).next_multiple_of(&BigInt::from(8)),
+ BigInt::from(16)
+ );
+ assert_eq!(
+ BigInt::from(23).next_multiple_of(&BigInt::from(8)),
+ BigInt::from(24)
+ );
+ assert_eq!(
+ BigInt::from(16).next_multiple_of(&BigInt::from(-8)),
+ BigInt::from(16)
+ );
+ assert_eq!(
+ BigInt::from(23).next_multiple_of(&BigInt::from(-8)),
+ BigInt::from(16)
+ );
+ assert_eq!(
+ BigInt::from(-16).next_multiple_of(&BigInt::from(8)),
+ BigInt::from(-16)
+ );
+ assert_eq!(
+ BigInt::from(-23).next_multiple_of(&BigInt::from(8)),
+ BigInt::from(-16)
+ );
+ assert_eq!(
+ BigInt::from(-16).next_multiple_of(&BigInt::from(-8)),
+ BigInt::from(-16)
+ );
+ assert_eq!(
+ BigInt::from(-23).next_multiple_of(&BigInt::from(-8)),
+ BigInt::from(-24)
+ );
+}
+
+#[test]
+fn test_prev_multiple_of() {
+ assert_eq!(
+ BigInt::from(16).prev_multiple_of(&BigInt::from(8)),
+ BigInt::from(16)
+ );
+ assert_eq!(
+ BigInt::from(23).prev_multiple_of(&BigInt::from(8)),
+ BigInt::from(16)
+ );
+ assert_eq!(
+ BigInt::from(16).prev_multiple_of(&BigInt::from(-8)),
+ BigInt::from(16)
+ );
+ assert_eq!(
+ BigInt::from(23).prev_multiple_of(&BigInt::from(-8)),
+ BigInt::from(24)
+ );
+ assert_eq!(
+ BigInt::from(-16).prev_multiple_of(&BigInt::from(8)),
+ BigInt::from(-16)
+ );
+ assert_eq!(
+ BigInt::from(-23).prev_multiple_of(&BigInt::from(8)),
+ BigInt::from(-24)
+ );
+ assert_eq!(
+ BigInt::from(-16).prev_multiple_of(&BigInt::from(-8)),
+ BigInt::from(-16)
+ );
+ assert_eq!(
+ BigInt::from(-23).prev_multiple_of(&BigInt::from(-8)),
+ BigInt::from(-16)
+ );
+}
+
+#[test]
+fn test_abs_sub() {
+ let zero: BigInt = Zero::zero();
+ let one: BigInt = One::one();
+ assert_eq!((-&one).abs_sub(&one), zero);
+ let one: BigInt = One::one();
+ let zero: BigInt = Zero::zero();
+ assert_eq!(one.abs_sub(&one), zero);
+ let one: BigInt = One::one();
+ let zero: BigInt = Zero::zero();
+ assert_eq!(one.abs_sub(&zero), one);
+ let one: BigInt = One::one();
+ let two: BigInt = FromPrimitive::from_isize(2).unwrap();
+ assert_eq!(one.abs_sub(&-&one), two);
+}
+
+#[test]
+fn test_from_str_radix() {
+ fn check(s: &str, ans: Option<isize>) {
+ let ans = ans.map(|n| {
+ let x: BigInt = FromPrimitive::from_isize(n).unwrap();
+ x
+ });
+ assert_eq!(BigInt::from_str_radix(s, 10).ok(), ans);
+ }
+ check("10", Some(10));
+ check("1", Some(1));
+ check("0", Some(0));
+ check("-1", Some(-1));
+ check("-10", Some(-10));
+ check("+10", Some(10));
+ check("--7", None);
+ check("++5", None);
+ check("+-9", None);
+ check("-+3", None);
+ check("Z", None);
+ check("_", None);
+
+ // issue 10522, this hit an edge case that caused it to
+ // attempt to allocate a vector of size (-1u) == huge.
+ let x: BigInt = format!("1{}", repeat("0").take(36).collect::<String>())
+ .parse()
+ .unwrap();
+ let _y = x.to_string();
+}
+
+#[test]
+fn test_lower_hex() {
+ let a = BigInt::parse_bytes(b"A", 16).unwrap();
+ let hello = BigInt::parse_bytes(b"-22405534230753963835153736737", 10).unwrap();
+
+ assert_eq!(format!("{:x}", a), "a");
+ assert_eq!(format!("{:x}", hello), "-48656c6c6f20776f726c6421");
+ assert_eq!(format!("{:♥>+#8x}", a), "♥♥♥♥+0xa");
+}
+
+#[test]
+fn test_upper_hex() {
+ let a = BigInt::parse_bytes(b"A", 16).unwrap();
+ let hello = BigInt::parse_bytes(b"-22405534230753963835153736737", 10).unwrap();
+
+ assert_eq!(format!("{:X}", a), "A");
+ assert_eq!(format!("{:X}", hello), "-48656C6C6F20776F726C6421");
+ assert_eq!(format!("{:♥>+#8X}", a), "♥♥♥♥+0xA");
+}
+
+#[test]
+fn test_binary() {
+ let a = BigInt::parse_bytes(b"A", 16).unwrap();
+ let hello = BigInt::parse_bytes(b"-224055342307539", 10).unwrap();
+
+ assert_eq!(format!("{:b}", a), "1010");
+ assert_eq!(
+ format!("{:b}", hello),
+ "-110010111100011011110011000101101001100011010011"
+ );
+ assert_eq!(format!("{:♥>+#8b}", a), "♥+0b1010");
+}
+
+#[test]
+fn test_octal() {
+ let a = BigInt::parse_bytes(b"A", 16).unwrap();
+ let hello = BigInt::parse_bytes(b"-22405534230753963835153736737", 10).unwrap();
+
+ assert_eq!(format!("{:o}", a), "12");
+ assert_eq!(format!("{:o}", hello), "-22062554330674403566756233062041");
+ assert_eq!(format!("{:♥>+#8o}", a), "♥♥♥+0o12");
+}
+
+#[test]
+fn test_display() {
+ let a = BigInt::parse_bytes(b"A", 16).unwrap();
+ let hello = BigInt::parse_bytes(b"-22405534230753963835153736737", 10).unwrap();
+
+ assert_eq!(format!("{}", a), "10");
+ assert_eq!(format!("{}", hello), "-22405534230753963835153736737");
+ assert_eq!(format!("{:♥>+#8}", a), "♥♥♥♥♥+10");
+}
+
+#[test]
+fn test_neg() {
+ assert!(-BigInt::new(Plus, vec![1, 1, 1]) == BigInt::new(Minus, vec![1, 1, 1]));
+ assert!(-BigInt::new(Minus, vec![1, 1, 1]) == BigInt::new(Plus, vec![1, 1, 1]));
+ let zero: BigInt = Zero::zero();
+ assert_eq!(-&zero, zero);
+}
+
+#[test]
+fn test_negative_shr() {
+ assert_eq!(BigInt::from(-1) >> 1, BigInt::from(-1));
+ assert_eq!(BigInt::from(-2) >> 1, BigInt::from(-1));
+ assert_eq!(BigInt::from(-3) >> 1, BigInt::from(-2));
+ assert_eq!(BigInt::from(-3) >> 2, BigInt::from(-1));
+}
+
+#[test]
+fn test_iter_sum() {
+ let result: BigInt = FromPrimitive::from_isize(-1234567).unwrap();
+ let data: Vec<BigInt> = vec![
+ FromPrimitive::from_i32(-1000000).unwrap(),
+ FromPrimitive::from_i32(-200000).unwrap(),
+ FromPrimitive::from_i32(-30000).unwrap(),
+ FromPrimitive::from_i32(-4000).unwrap(),
+ FromPrimitive::from_i32(-500).unwrap(),
+ FromPrimitive::from_i32(-60).unwrap(),
+ FromPrimitive::from_i32(-7).unwrap(),
+ ];
+
+ assert_eq!(result, data.iter().sum::<BigInt>());
+ assert_eq!(result, data.into_iter().sum::<BigInt>());
+}
+
+#[test]
+fn test_iter_product() {
+ let data: Vec<BigInt> = vec![
+ FromPrimitive::from_i32(1001).unwrap(),
+ FromPrimitive::from_i32(-1002).unwrap(),
+ FromPrimitive::from_i32(1003).unwrap(),
+ FromPrimitive::from_i32(-1004).unwrap(),
+ FromPrimitive::from_i32(1005).unwrap(),
+ ];
+ let result = data.get(0).unwrap()
+ * data.get(1).unwrap()
+ * data.get(2).unwrap()
+ * data.get(3).unwrap()
+ * data.get(4).unwrap();
+
+ assert_eq!(result, data.iter().product::<BigInt>());
+ assert_eq!(result, data.into_iter().product::<BigInt>());
+}
+
+#[test]
+fn test_iter_sum_generic() {
+ let result: BigInt = FromPrimitive::from_isize(-1234567).unwrap();
+ let data = vec![-1000000, -200000, -30000, -4000, -500, -60, -7];
+
+ assert_eq!(result, data.iter().sum::<BigInt>());
+ assert_eq!(result, data.into_iter().sum::<BigInt>());
+}
+
+#[test]
+fn test_iter_product_generic() {
+ let data = vec![1001, -1002, 1003, -1004, 1005];
+ let result = data[0].to_bigint().unwrap()
+ * data[1].to_bigint().unwrap()
+ * data[2].to_bigint().unwrap()
+ * data[3].to_bigint().unwrap()
+ * data[4].to_bigint().unwrap();
+
+ assert_eq!(result, data.iter().product::<BigInt>());
+ assert_eq!(result, data.into_iter().product::<BigInt>());
+}
+
+#[test]
+fn test_pow() {
+ let one = BigInt::from(1i32);
+ let two = BigInt::from(2i32);
+ let four = BigInt::from(4i32);
+ let eight = BigInt::from(8i32);
+ let minus_two = BigInt::from(-2i32);
+ macro_rules! check {
+ ($t:ty) => {
+ assert_eq!(Pow::pow(&two, 0 as $t), one);
+ assert_eq!(Pow::pow(&two, 1 as $t), two);
+ assert_eq!(Pow::pow(&two, 2 as $t), four);
+ assert_eq!(Pow::pow(&two, 3 as $t), eight);
+ assert_eq!(Pow::pow(&two, &(3 as $t)), eight);
+ assert_eq!(Pow::pow(&minus_two, 0 as $t), one, "-2^0");
+ assert_eq!(Pow::pow(&minus_two, 1 as $t), minus_two, "-2^1");
+ assert_eq!(Pow::pow(&minus_two, 2 as $t), four, "-2^2");
+ assert_eq!(Pow::pow(&minus_two, 3 as $t), -&eight, "-2^3");
+ };
+ }
+ check!(u8);
+ check!(u16);
+ check!(u32);
+ check!(u64);
+ check!(usize);
+}
+
+#[test]
+fn test_bit() {
+ // 12 = (1100)_2
+ assert!(!BigInt::from(0b1100u8).bit(0));
+ assert!(!BigInt::from(0b1100u8).bit(1));
+ assert!(BigInt::from(0b1100u8).bit(2));
+ assert!(BigInt::from(0b1100u8).bit(3));
+ assert!(!BigInt::from(0b1100u8).bit(4));
+ assert!(!BigInt::from(0b1100u8).bit(200));
+ assert!(!BigInt::from(0b1100u8).bit(u64::MAX));
+ // -12 = (...110100)_2
+ assert!(!BigInt::from(-12i8).bit(0));
+ assert!(!BigInt::from(-12i8).bit(1));
+ assert!(BigInt::from(-12i8).bit(2));
+ assert!(!BigInt::from(-12i8).bit(3));
+ assert!(BigInt::from(-12i8).bit(4));
+ assert!(BigInt::from(-12i8).bit(200));
+ assert!(BigInt::from(-12i8).bit(u64::MAX));
+}
+
+#[test]
+fn test_set_bit() {
+ let mut x: BigInt;
+
+ // zero
+ x = BigInt::zero();
+ x.set_bit(200, true);
+ assert_eq!(x, BigInt::one() << 200);
+ x = BigInt::zero();
+ x.set_bit(200, false);
+ assert_eq!(x, BigInt::zero());
+
+ // positive numbers
+ x = BigInt::from_biguint(Plus, BigUint::one() << 200);
+ x.set_bit(10, true);
+ x.set_bit(200, false);
+ assert_eq!(x, BigInt::one() << 10);
+ x.set_bit(10, false);
+ x.set_bit(5, false);
+ assert_eq!(x, BigInt::zero());
+
+ // negative numbers
+ x = BigInt::from(-12i8);
+ x.set_bit(200, true);
+ assert_eq!(x, BigInt::from(-12i8));
+ x.set_bit(200, false);
+ assert_eq!(
+ x,
+ BigInt::from_biguint(Minus, BigUint::from(12u8) | (BigUint::one() << 200))
+ );
+ x.set_bit(6, false);
+ assert_eq!(
+ x,
+ BigInt::from_biguint(Minus, BigUint::from(76u8) | (BigUint::one() << 200))
+ );
+ x.set_bit(6, true);
+ assert_eq!(
+ x,
+ BigInt::from_biguint(Minus, BigUint::from(12u8) | (BigUint::one() << 200))
+ );
+ x.set_bit(200, true);
+ assert_eq!(x, BigInt::from(-12i8));
+
+ x = BigInt::from_biguint(Minus, BigUint::one() << 30);
+ x.set_bit(10, true);
+ assert_eq!(
+ x,
+ BigInt::from_biguint(Minus, (BigUint::one() << 30) - (BigUint::one() << 10))
+ );
+
+ x = BigInt::from_biguint(Minus, BigUint::one() << 200);
+ x.set_bit(40, true);
+ assert_eq!(
+ x,
+ BigInt::from_biguint(Minus, (BigUint::one() << 200) - (BigUint::one() << 40))
+ );
+
+ x = BigInt::from_biguint(Minus, (BigUint::one() << 200) | (BigUint::one() << 100));
+ x.set_bit(100, false);
+ assert_eq!(
+ x,
+ BigInt::from_biguint(Minus, (BigUint::one() << 200) | (BigUint::one() << 101))
+ );
+
+ x = BigInt::from_biguint(Minus, (BigUint::one() << 63) | (BigUint::one() << 62));
+ x.set_bit(62, false);
+ assert_eq!(x, BigInt::from_biguint(Minus, BigUint::one() << 64));
+
+ x = BigInt::from_biguint(Minus, (BigUint::one() << 200) - BigUint::one());
+ x.set_bit(0, false);
+ assert_eq!(x, BigInt::from_biguint(Minus, BigUint::one() << 200));
+}
diff --git a/tests/bigint_bitwise.rs b/tests/bigint_bitwise.rs
new file mode 100644
index 0000000..6c1e82f
--- /dev/null
+++ b/tests/bigint_bitwise.rs
@@ -0,0 +1,178 @@
+use num_bigint::{BigInt, Sign, ToBigInt};
+use num_traits::ToPrimitive;
+use std::{i32, i64, u32};
+
+enum ValueVec {
+ N,
+ P(&'static [u32]),
+ M(&'static [u32]),
+}
+
+use crate::ValueVec::*;
+
+impl ToBigInt for ValueVec {
+ fn to_bigint(&self) -> Option<BigInt> {
+ match self {
+ &N => Some(BigInt::from_slice(Sign::NoSign, &[])),
+ &P(s) => Some(BigInt::from_slice(Sign::Plus, s)),
+ &M(s) => Some(BigInt::from_slice(Sign::Minus, s)),
+ }
+ }
+}
+
+// a, !a
+const NOT_VALUES: &[(ValueVec, ValueVec)] = &[
+ (N, M(&[1])),
+ (P(&[1]), M(&[2])),
+ (P(&[2]), M(&[3])),
+ (P(&[!0 - 2]), M(&[!0 - 1])),
+ (P(&[!0 - 1]), M(&[!0])),
+ (P(&[!0]), M(&[0, 1])),
+ (P(&[0, 1]), M(&[1, 1])),
+ (P(&[1, 1]), M(&[2, 1])),
+];
+
+// a, b, a & b, a | b, a ^ b
+const BITWISE_VALUES: &[(ValueVec, ValueVec, ValueVec, ValueVec, ValueVec)] = &[
+ (N, N, N, N, N),
+ (N, P(&[1]), N, P(&[1]), P(&[1])),
+ (N, P(&[!0]), N, P(&[!0]), P(&[!0])),
+ (N, P(&[0, 1]), N, P(&[0, 1]), P(&[0, 1])),
+ (N, M(&[1]), N, M(&[1]), M(&[1])),
+ (N, M(&[!0]), N, M(&[!0]), M(&[!0])),
+ (N, M(&[0, 1]), N, M(&[0, 1]), M(&[0, 1])),
+ (P(&[1]), P(&[!0]), P(&[1]), P(&[!0]), P(&[!0 - 1])),
+ (P(&[!0]), P(&[!0]), P(&[!0]), P(&[!0]), N),
+ (P(&[!0]), P(&[1, 1]), P(&[1]), P(&[!0, 1]), P(&[!0 - 1, 1])),
+ (P(&[1]), M(&[!0]), P(&[1]), M(&[!0]), M(&[0, 1])),
+ (P(&[!0]), M(&[1]), P(&[!0]), M(&[1]), M(&[0, 1])),
+ (P(&[!0]), M(&[!0]), P(&[1]), M(&[1]), M(&[2])),
+ (P(&[!0]), M(&[1, 1]), P(&[!0]), M(&[1, 1]), M(&[0, 2])),
+ (P(&[1, 1]), M(&[!0]), P(&[1, 1]), M(&[!0]), M(&[0, 2])),
+ (M(&[1]), M(&[!0]), M(&[!0]), M(&[1]), P(&[!0 - 1])),
+ (M(&[!0]), M(&[!0]), M(&[!0]), M(&[!0]), N),
+ (M(&[!0]), M(&[1, 1]), M(&[!0, 1]), M(&[1]), P(&[!0 - 1, 1])),
+];
+
+const I32_MIN: i64 = i32::MIN as i64;
+const I32_MAX: i64 = i32::MAX as i64;
+const U32_MAX: i64 = u32::MAX as i64;
+
+// some corner cases
+const I64_VALUES: &[i64] = &[
+ i64::MIN,
+ i64::MIN + 1,
+ i64::MIN + 2,
+ i64::MIN + 3,
+ -U32_MAX - 3,
+ -U32_MAX - 2,
+ -U32_MAX - 1,
+ -U32_MAX,
+ -U32_MAX + 1,
+ -U32_MAX + 2,
+ -U32_MAX + 3,
+ I32_MIN - 3,
+ I32_MIN - 2,
+ I32_MIN - 1,
+ I32_MIN,
+ I32_MIN + 1,
+ I32_MIN + 2,
+ I32_MIN + 3,
+ -3,
+ -2,
+ -1,
+ 0,
+ 1,
+ 2,
+ 3,
+ I32_MAX - 3,
+ I32_MAX - 2,
+ I32_MAX - 1,
+ I32_MAX,
+ I32_MAX + 1,
+ I32_MAX + 2,
+ I32_MAX + 3,
+ U32_MAX - 3,
+ U32_MAX - 2,
+ U32_MAX - 1,
+ U32_MAX,
+ U32_MAX + 1,
+ U32_MAX + 2,
+ U32_MAX + 3,
+ i64::MAX - 3,
+ i64::MAX - 2,
+ i64::MAX - 1,
+ i64::MAX,
+];
+
+#[test]
+fn test_not() {
+ for &(ref a, ref not) in NOT_VALUES.iter() {
+ let a = a.to_bigint().unwrap();
+ let not = not.to_bigint().unwrap();
+
+ // sanity check for tests that fit in i64
+ if let (Some(prim_a), Some(prim_not)) = (a.to_i64(), not.to_i64()) {
+ assert_eq!(!prim_a, prim_not);
+ }
+
+ assert_eq!(!a.clone(), not, "!{:x}", a);
+ assert_eq!(!not.clone(), a, "!{:x}", not);
+ }
+}
+
+#[test]
+fn test_not_i64() {
+ for &prim_a in I64_VALUES.iter() {
+ let a = prim_a.to_bigint().unwrap();
+ let not = (!prim_a).to_bigint().unwrap();
+ assert_eq!(!a.clone(), not, "!{:x}", a);
+ }
+}
+
+#[test]
+fn test_bitwise() {
+ for &(ref a, ref b, ref and, ref or, ref xor) in BITWISE_VALUES.iter() {
+ let a = a.to_bigint().unwrap();
+ let b = b.to_bigint().unwrap();
+ let and = and.to_bigint().unwrap();
+ let or = or.to_bigint().unwrap();
+ let xor = xor.to_bigint().unwrap();
+
+ // sanity check for tests that fit in i64
+ if let (Some(prim_a), Some(prim_b)) = (a.to_i64(), b.to_i64()) {
+ if let Some(prim_and) = and.to_i64() {
+ assert_eq!(prim_a & prim_b, prim_and);
+ }
+ if let Some(prim_or) = or.to_i64() {
+ assert_eq!(prim_a | prim_b, prim_or);
+ }
+ if let Some(prim_xor) = xor.to_i64() {
+ assert_eq!(prim_a ^ prim_b, prim_xor);
+ }
+ }
+
+ assert_eq!(a.clone() & &b, and, "{:x} & {:x}", a, b);
+ assert_eq!(b.clone() & &a, and, "{:x} & {:x}", b, a);
+ assert_eq!(a.clone() | &b, or, "{:x} | {:x}", a, b);
+ assert_eq!(b.clone() | &a, or, "{:x} | {:x}", b, a);
+ assert_eq!(a.clone() ^ &b, xor, "{:x} ^ {:x}", a, b);
+ assert_eq!(b.clone() ^ &a, xor, "{:x} ^ {:x}", b, a);
+ }
+}
+
+#[test]
+fn test_bitwise_i64() {
+ for &prim_a in I64_VALUES.iter() {
+ let a = prim_a.to_bigint().unwrap();
+ for &prim_b in I64_VALUES.iter() {
+ let b = prim_b.to_bigint().unwrap();
+ let and = (prim_a & prim_b).to_bigint().unwrap();
+ let or = (prim_a | prim_b).to_bigint().unwrap();
+ let xor = (prim_a ^ prim_b).to_bigint().unwrap();
+ assert_eq!(a.clone() & &b, and, "{:x} & {:x}", a, b);
+ assert_eq!(a.clone() | &b, or, "{:x} | {:x}", a, b);
+ assert_eq!(a.clone() ^ &b, xor, "{:x} ^ {:x}", a, b);
+ }
+ }
+}
diff --git a/tests/bigint_scalar.rs b/tests/bigint_scalar.rs
new file mode 100644
index 0000000..485f2c5
--- /dev/null
+++ b/tests/bigint_scalar.rs
@@ -0,0 +1,148 @@
+use num_bigint::BigInt;
+use num_bigint::Sign::Plus;
+use num_traits::{Signed, ToPrimitive, Zero};
+
+use std::ops::Neg;
+
+mod consts;
+use crate::consts::*;
+
+#[macro_use]
+mod macros;
+
+#[test]
+fn test_scalar_add() {
+ fn check(x: &BigInt, y: &BigInt, z: &BigInt) {
+ let (x, y, z) = (x.clone(), y.clone(), z.clone());
+ assert_signed_scalar_op!(x + y == z);
+ assert_signed_scalar_assign_op!(x += y == z);
+ }
+
+ for elm in SUM_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+ let (na, nb, nc) = (-&a, -&b, -&c);
+
+ check(&a, &b, &c);
+ check(&b, &a, &c);
+ check(&c, &na, &b);
+ check(&c, &nb, &a);
+ check(&a, &nc, &nb);
+ check(&b, &nc, &na);
+ check(&na, &nb, &nc);
+ check(&a, &na, &Zero::zero());
+ }
+}
+
+#[test]
+fn test_scalar_sub() {
+ fn check(x: &BigInt, y: &BigInt, z: &BigInt) {
+ let (x, y, z) = (x.clone(), y.clone(), z.clone());
+ assert_signed_scalar_op!(x - y == z);
+ assert_signed_scalar_assign_op!(x -= y == z);
+ }
+
+ for elm in SUM_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+ let (na, nb, nc) = (-&a, -&b, -&c);
+
+ check(&c, &a, &b);
+ check(&c, &b, &a);
+ check(&nb, &a, &nc);
+ check(&na, &b, &nc);
+ check(&b, &na, &c);
+ check(&a, &nb, &c);
+ check(&nc, &na, &nb);
+ check(&a, &a, &Zero::zero());
+ }
+}
+
+#[test]
+fn test_scalar_mul() {
+ fn check(x: &BigInt, y: &BigInt, z: &BigInt) {
+ let (x, y, z) = (x.clone(), y.clone(), z.clone());
+ assert_signed_scalar_op!(x * y == z);
+ assert_signed_scalar_assign_op!(x *= y == z);
+ }
+
+ for elm in MUL_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+ let (na, nb, nc) = (-&a, -&b, -&c);
+
+ check(&a, &b, &c);
+ check(&b, &a, &c);
+ check(&na, &nb, &c);
+
+ check(&na, &b, &nc);
+ check(&nb, &a, &nc);
+ }
+}
+
+#[test]
+fn test_scalar_div_rem() {
+ fn check_sub(a: &BigInt, b: u32, ans_q: &BigInt, ans_r: &BigInt) {
+ let (q, r) = (a / b, a % b);
+ if !r.is_zero() {
+ assert_eq!(r.sign(), a.sign());
+ }
+ assert!(r.abs() <= BigInt::from(b));
+ assert!(*a == b * &q + &r);
+ assert!(q == *ans_q);
+ assert!(r == *ans_r);
+
+ let b = BigInt::from(b);
+ let (a, ans_q, ans_r) = (a.clone(), ans_q.clone(), ans_r.clone());
+ assert_signed_scalar_op!(a / b == ans_q);
+ assert_signed_scalar_op!(a % b == ans_r);
+ assert_signed_scalar_assign_op!(a /= b == ans_q);
+ assert_signed_scalar_assign_op!(a %= b == ans_r);
+
+ let nb = -b;
+ assert_signed_scalar_op!(a / nb == -ans_q.clone());
+ assert_signed_scalar_op!(a % nb == ans_r);
+ assert_signed_scalar_assign_op!(a /= nb == -ans_q.clone());
+ assert_signed_scalar_assign_op!(a %= nb == ans_r);
+ }
+
+ fn check(a: &BigInt, b: u32, q: &BigInt, r: &BigInt) {
+ check_sub(a, b, q, r);
+ check_sub(&a.neg(), b, &q.neg(), &r.neg());
+ }
+
+ for elm in MUL_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+
+ if a_vec.len() == 1 && a_vec[0] != 0 {
+ let a = a_vec[0];
+ check(&c, a, &b, &Zero::zero());
+ }
+
+ if b_vec.len() == 1 && b_vec[0] != 0 {
+ let b = b_vec[0];
+ check(&c, b, &a, &Zero::zero());
+ }
+ }
+
+ for elm in DIV_REM_QUADRUPLES.iter() {
+ let (a_vec, b_vec, c_vec, d_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+ let d = BigInt::from_slice(Plus, d_vec);
+
+ if b_vec.len() == 1 && b_vec[0] != 0 {
+ let b = b_vec[0];
+ check(&a, b, &c, &d);
+ }
+ }
+}
diff --git a/tests/biguint.rs b/tests/biguint.rs
new file mode 100644
index 0000000..13b69f2
--- /dev/null
+++ b/tests/biguint.rs
@@ -0,0 +1,1836 @@
+use num_bigint::Sign::Plus;
+use num_bigint::{BigInt, ToBigInt};
+use num_bigint::{BigUint, ToBigUint};
+use num_integer::Integer;
+
+use std::cmp::Ordering::{Equal, Greater, Less};
+use std::collections::hash_map::RandomState;
+use std::hash::{BuildHasher, Hash, Hasher};
+use std::i64;
+use std::iter::repeat;
+use std::str::FromStr;
+use std::{f32, f64};
+use std::{i128, u128};
+use std::{u16, u32, u64, u8, usize};
+
+use num_traits::{
+ pow, CheckedAdd, CheckedDiv, CheckedMul, CheckedSub, FromPrimitive, Num, One, Pow, ToPrimitive,
+ Zero,
+};
+
+mod consts;
+use crate::consts::*;
+
+#[macro_use]
+mod macros;
+
+#[test]
+fn test_from_bytes_be() {
+ fn check(s: &str, result: &str) {
+ assert_eq!(
+ BigUint::from_bytes_be(s.as_bytes()),
+ BigUint::parse_bytes(result.as_bytes(), 10).unwrap()
+ );
+ }
+ check("A", "65");
+ check("AA", "16705");
+ check("AB", "16706");
+ check("Hello world!", "22405534230753963835153736737");
+ assert_eq!(BigUint::from_bytes_be(&[]), BigUint::zero());
+}
+
+#[test]
+fn test_to_bytes_be() {
+ fn check(s: &str, result: &str) {
+ let b = BigUint::parse_bytes(result.as_bytes(), 10).unwrap();
+ assert_eq!(b.to_bytes_be(), s.as_bytes());
+ }
+ check("A", "65");
+ check("AA", "16705");
+ check("AB", "16706");
+ check("Hello world!", "22405534230753963835153736737");
+ let b: BigUint = Zero::zero();
+ assert_eq!(b.to_bytes_be(), [0]);
+
+ // Test with leading/trailing zero bytes and a full BigDigit of value 0
+ let b = BigUint::from_str_radix("00010000000000000200", 16).unwrap();
+ assert_eq!(b.to_bytes_be(), [1, 0, 0, 0, 0, 0, 0, 2, 0]);
+}
+
+#[test]
+fn test_from_bytes_le() {
+ fn check(s: &str, result: &str) {
+ assert_eq!(
+ BigUint::from_bytes_le(s.as_bytes()),
+ BigUint::parse_bytes(result.as_bytes(), 10).unwrap()
+ );
+ }
+ check("A", "65");
+ check("AA", "16705");
+ check("BA", "16706");
+ check("!dlrow olleH", "22405534230753963835153736737");
+ assert_eq!(BigUint::from_bytes_le(&[]), BigUint::zero());
+}
+
+#[test]
+fn test_to_bytes_le() {
+ fn check(s: &str, result: &str) {
+ let b = BigUint::parse_bytes(result.as_bytes(), 10).unwrap();
+ assert_eq!(b.to_bytes_le(), s.as_bytes());
+ }
+ check("A", "65");
+ check("AA", "16705");
+ check("BA", "16706");
+ check("!dlrow olleH", "22405534230753963835153736737");
+ let b: BigUint = Zero::zero();
+ assert_eq!(b.to_bytes_le(), [0]);
+
+ // Test with leading/trailing zero bytes and a full BigDigit of value 0
+ let b = BigUint::from_str_radix("00010000000000000200", 16).unwrap();
+ assert_eq!(b.to_bytes_le(), [0, 2, 0, 0, 0, 0, 0, 0, 1]);
+}
+
+#[test]
+fn test_cmp() {
+ let data: [&[_]; 7] = [&[], &[1], &[2], &[!0], &[0, 1], &[2, 1], &[1, 1, 1]];
+ let data: Vec<BigUint> = data.iter().map(|v| BigUint::from_slice(*v)).collect();
+ for (i, ni) in data.iter().enumerate() {
+ for (j0, nj) in data[i..].iter().enumerate() {
+ let j = j0 + i;
+ if i == j {
+ assert_eq!(ni.cmp(nj), Equal);
+ assert_eq!(nj.cmp(ni), Equal);
+ assert_eq!(ni, nj);
+ assert!(!(ni != nj));
+ assert!(ni <= nj);
+ assert!(ni >= nj);
+ assert!(!(ni < nj));
+ assert!(!(ni > nj));
+ } else {
+ assert_eq!(ni.cmp(nj), Less);
+ assert_eq!(nj.cmp(ni), Greater);
+
+ assert!(!(ni == nj));
+ assert!(ni != nj);
+
+ assert!(ni <= nj);
+ assert!(!(ni >= nj));
+ assert!(ni < nj);
+ assert!(!(ni > nj));
+
+ assert!(!(nj <= ni));
+ assert!(nj >= ni);
+ assert!(!(nj < ni));
+ assert!(nj > ni);
+ }
+ }
+ }
+}
+
+fn hash<T: Hash>(x: &T) -> u64 {
+ let mut hasher = <RandomState as BuildHasher>::Hasher::new();
+ x.hash(&mut hasher);
+ hasher.finish()
+}
+
+#[test]
+fn test_hash() {
+ use crate::hash;
+
+ let a = BigUint::new(vec![]);
+ let b = BigUint::new(vec![0]);
+ let c = BigUint::new(vec![1]);
+ let d = BigUint::new(vec![1, 0, 0, 0, 0, 0]);
+ let e = BigUint::new(vec![0, 0, 0, 0, 0, 1]);
+ assert!(hash(&a) == hash(&b));
+ assert!(hash(&b) != hash(&c));
+ assert!(hash(&c) == hash(&d));
+ assert!(hash(&d) != hash(&e));
+}
+
+// LEFT, RIGHT, AND, OR, XOR
+const BIT_TESTS: &[(&[u32], &[u32], &[u32], &[u32], &[u32])] = &[
+ (&[], &[], &[], &[], &[]),
+ (&[1, 0, 1], &[1, 1], &[1], &[1, 1, 1], &[0, 1, 1]),
+ (&[1, 0, 1], &[0, 1, 1], &[0, 0, 1], &[1, 1, 1], &[1, 1]),
+ (
+ &[268, 482, 17],
+ &[964, 54],
+ &[260, 34],
+ &[972, 502, 17],
+ &[712, 468, 17],
+ ),
+];
+
+#[test]
+fn test_bitand() {
+ for elm in BIT_TESTS {
+ let (a_vec, b_vec, c_vec, _, _) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+
+ assert_op!(a & b == c);
+ assert_op!(b & a == c);
+ assert_assign_op!(a &= b == c);
+ assert_assign_op!(b &= a == c);
+ }
+}
+
+#[test]
+fn test_bitor() {
+ for elm in BIT_TESTS {
+ let (a_vec, b_vec, _, c_vec, _) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+
+ assert_op!(a | b == c);
+ assert_op!(b | a == c);
+ assert_assign_op!(a |= b == c);
+ assert_assign_op!(b |= a == c);
+ }
+}
+
+#[test]
+fn test_bitxor() {
+ for elm in BIT_TESTS {
+ let (a_vec, b_vec, _, _, c_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+
+ assert_op!(a ^ b == c);
+ assert_op!(b ^ a == c);
+ assert_op!(a ^ c == b);
+ assert_op!(c ^ a == b);
+ assert_op!(b ^ c == a);
+ assert_op!(c ^ b == a);
+ assert_assign_op!(a ^= b == c);
+ assert_assign_op!(b ^= a == c);
+ assert_assign_op!(a ^= c == b);
+ assert_assign_op!(c ^= a == b);
+ assert_assign_op!(b ^= c == a);
+ assert_assign_op!(c ^= b == a);
+ }
+}
+
+#[test]
+fn test_shl() {
+ fn check(s: &str, shift: usize, ans: &str) {
+ let opt_biguint = BigUint::from_str_radix(s, 16).ok();
+ let mut bu_assign = opt_biguint.unwrap();
+ let bu = (bu_assign.clone() << shift).to_str_radix(16);
+ assert_eq!(bu, ans);
+ bu_assign <<= shift;
+ assert_eq!(bu_assign.to_str_radix(16), ans);
+ }
+
+ check("0", 3, "0");
+ check("1", 3, "8");
+
+ check(
+ "1\
+ 0000\
+ 0000\
+ 0000\
+ 0001\
+ 0000\
+ 0000\
+ 0000\
+ 0001",
+ 3,
+ "8\
+ 0000\
+ 0000\
+ 0000\
+ 0008\
+ 0000\
+ 0000\
+ 0000\
+ 0008",
+ );
+ check(
+ "1\
+ 0000\
+ 0001\
+ 0000\
+ 0001",
+ 2,
+ "4\
+ 0000\
+ 0004\
+ 0000\
+ 0004",
+ );
+ check(
+ "1\
+ 0001\
+ 0001",
+ 1,
+ "2\
+ 0002\
+ 0002",
+ );
+
+ check(
+ "\
+ 4000\
+ 0000\
+ 0000\
+ 0000",
+ 3,
+ "2\
+ 0000\
+ 0000\
+ 0000\
+ 0000",
+ );
+ check(
+ "4000\
+ 0000",
+ 2,
+ "1\
+ 0000\
+ 0000",
+ );
+ check(
+ "4000",
+ 2,
+ "1\
+ 0000",
+ );
+
+ check(
+ "4000\
+ 0000\
+ 0000\
+ 0000",
+ 67,
+ "2\
+ 0000\
+ 0000\
+ 0000\
+ 0000\
+ 0000\
+ 0000\
+ 0000\
+ 0000",
+ );
+ check(
+ "4000\
+ 0000",
+ 35,
+ "2\
+ 0000\
+ 0000\
+ 0000\
+ 0000",
+ );
+ check(
+ "4000",
+ 19,
+ "2\
+ 0000\
+ 0000",
+ );
+
+ check(
+ "fedc\
+ ba98\
+ 7654\
+ 3210\
+ fedc\
+ ba98\
+ 7654\
+ 3210",
+ 4,
+ "f\
+ edcb\
+ a987\
+ 6543\
+ 210f\
+ edcb\
+ a987\
+ 6543\
+ 2100",
+ );
+ check(
+ "88887777666655554444333322221111",
+ 16,
+ "888877776666555544443333222211110000",
+ );
+}
+
+#[test]
+fn test_shr() {
+ fn check(s: &str, shift: usize, ans: &str) {
+ let opt_biguint = BigUint::from_str_radix(s, 16).ok();
+ let mut bu_assign = opt_biguint.unwrap();
+ let bu = (bu_assign.clone() >> shift).to_str_radix(16);
+ assert_eq!(bu, ans);
+ bu_assign >>= shift;
+ assert_eq!(bu_assign.to_str_radix(16), ans);
+ }
+
+ check("0", 3, "0");
+ check("f", 3, "1");
+
+ check(
+ "1\
+ 0000\
+ 0000\
+ 0000\
+ 0001\
+ 0000\
+ 0000\
+ 0000\
+ 0001",
+ 3,
+ "2000\
+ 0000\
+ 0000\
+ 0000\
+ 2000\
+ 0000\
+ 0000\
+ 0000",
+ );
+ check(
+ "1\
+ 0000\
+ 0001\
+ 0000\
+ 0001",
+ 2,
+ "4000\
+ 0000\
+ 4000\
+ 0000",
+ );
+ check(
+ "1\
+ 0001\
+ 0001",
+ 1,
+ "8000\
+ 8000",
+ );
+
+ check(
+ "2\
+ 0000\
+ 0000\
+ 0000\
+ 0001\
+ 0000\
+ 0000\
+ 0000\
+ 0001",
+ 67,
+ "4000\
+ 0000\
+ 0000\
+ 0000",
+ );
+ check(
+ "2\
+ 0000\
+ 0001\
+ 0000\
+ 0001",
+ 35,
+ "4000\
+ 0000",
+ );
+ check(
+ "2\
+ 0001\
+ 0001",
+ 19,
+ "4000",
+ );
+
+ check(
+ "1\
+ 0000\
+ 0000\
+ 0000\
+ 0000",
+ 1,
+ "8000\
+ 0000\
+ 0000\
+ 0000",
+ );
+ check(
+ "1\
+ 0000\
+ 0000",
+ 1,
+ "8000\
+ 0000",
+ );
+ check(
+ "1\
+ 0000",
+ 1,
+ "8000",
+ );
+ check(
+ "f\
+ edcb\
+ a987\
+ 6543\
+ 210f\
+ edcb\
+ a987\
+ 6543\
+ 2100",
+ 4,
+ "fedc\
+ ba98\
+ 7654\
+ 3210\
+ fedc\
+ ba98\
+ 7654\
+ 3210",
+ );
+
+ check(
+ "888877776666555544443333222211110000",
+ 16,
+ "88887777666655554444333322221111",
+ );
+}
+
+// `DoubleBigDigit` size dependent
+#[test]
+fn test_convert_i64() {
+ fn check(b1: BigUint, i: i64) {
+ let b2: BigUint = FromPrimitive::from_i64(i).unwrap();
+ assert_eq!(b1, b2);
+ assert_eq!(b1.to_i64().unwrap(), i);
+ }
+
+ check(Zero::zero(), 0);
+ check(One::one(), 1);
+ check(i64::MAX.to_biguint().unwrap(), i64::MAX);
+
+ check(BigUint::new(vec![]), 0);
+ check(BigUint::new(vec![1]), 1);
+ check(BigUint::new(vec![N1]), (1 << 32) - 1);
+ check(BigUint::new(vec![0, 1]), 1 << 32);
+ check(BigUint::new(vec![N1, N1 >> 1]), i64::MAX);
+
+ assert_eq!(i64::MIN.to_biguint(), None);
+ assert_eq!(BigUint::new(vec![N1, N1]).to_i64(), None);
+ assert_eq!(BigUint::new(vec![0, 0, 1]).to_i64(), None);
+ assert_eq!(BigUint::new(vec![N1, N1, N1]).to_i64(), None);
+}
+
+#[test]
+fn test_convert_i128() {
+ fn check(b1: BigUint, i: i128) {
+ let b2: BigUint = FromPrimitive::from_i128(i).unwrap();
+ assert_eq!(b1, b2);
+ assert_eq!(b1.to_i128().unwrap(), i);
+ }
+
+ check(Zero::zero(), 0);
+ check(One::one(), 1);
+ check(i128::MAX.to_biguint().unwrap(), i128::MAX);
+
+ check(BigUint::new(vec![]), 0);
+ check(BigUint::new(vec![1]), 1);
+ check(BigUint::new(vec![N1]), (1 << 32) - 1);
+ check(BigUint::new(vec![0, 1]), 1 << 32);
+ check(BigUint::new(vec![N1, N1, N1, N1 >> 1]), i128::MAX);
+
+ assert_eq!(i128::MIN.to_biguint(), None);
+ assert_eq!(BigUint::new(vec![N1, N1, N1, N1]).to_i128(), None);
+ assert_eq!(BigUint::new(vec![0, 0, 0, 0, 1]).to_i128(), None);
+ assert_eq!(BigUint::new(vec![N1, N1, N1, N1, N1]).to_i128(), None);
+}
+
+// `DoubleBigDigit` size dependent
+#[test]
+fn test_convert_u64() {
+ fn check(b1: BigUint, u: u64) {
+ let b2: BigUint = FromPrimitive::from_u64(u).unwrap();
+ assert_eq!(b1, b2);
+ assert_eq!(b1.to_u64().unwrap(), u);
+ }
+
+ check(Zero::zero(), 0);
+ check(One::one(), 1);
+ check(u64::MIN.to_biguint().unwrap(), u64::MIN);
+ check(u64::MAX.to_biguint().unwrap(), u64::MAX);
+
+ check(BigUint::new(vec![]), 0);
+ check(BigUint::new(vec![1]), 1);
+ check(BigUint::new(vec![N1]), (1 << 32) - 1);
+ check(BigUint::new(vec![0, 1]), 1 << 32);
+ check(BigUint::new(vec![N1, N1]), u64::MAX);
+
+ assert_eq!(BigUint::new(vec![0, 0, 1]).to_u64(), None);
+ assert_eq!(BigUint::new(vec![N1, N1, N1]).to_u64(), None);
+}
+
+#[test]
+fn test_convert_u128() {
+ fn check(b1: BigUint, u: u128) {
+ let b2: BigUint = FromPrimitive::from_u128(u).unwrap();
+ assert_eq!(b1, b2);
+ assert_eq!(b1.to_u128().unwrap(), u);
+ }
+
+ check(Zero::zero(), 0);
+ check(One::one(), 1);
+ check(u128::MIN.to_biguint().unwrap(), u128::MIN);
+ check(u128::MAX.to_biguint().unwrap(), u128::MAX);
+
+ check(BigUint::new(vec![]), 0);
+ check(BigUint::new(vec![1]), 1);
+ check(BigUint::new(vec![N1]), (1 << 32) - 1);
+ check(BigUint::new(vec![0, 1]), 1 << 32);
+ check(BigUint::new(vec![N1, N1, N1, N1]), u128::MAX);
+
+ assert_eq!(BigUint::new(vec![0, 0, 0, 0, 1]).to_u128(), None);
+ assert_eq!(BigUint::new(vec![N1, N1, N1, N1, N1]).to_u128(), None);
+}
+
+#[test]
+#[allow(clippy::float_cmp)]
+fn test_convert_f32() {
+ fn check(b1: &BigUint, f: f32) {
+ let b2 = BigUint::from_f32(f).unwrap();
+ assert_eq!(b1, &b2);
+ assert_eq!(b1.to_f32().unwrap(), f);
+ }
+
+ check(&BigUint::zero(), 0.0);
+ check(&BigUint::one(), 1.0);
+ check(&BigUint::from(u16::MAX), pow(2.0_f32, 16) - 1.0);
+ check(&BigUint::from(1u64 << 32), pow(2.0_f32, 32));
+ check(&BigUint::from_slice(&[0, 0, 1]), pow(2.0_f32, 64));
+ check(
+ &((BigUint::one() << 100) + (BigUint::one() << 123)),
+ pow(2.0_f32, 100) + pow(2.0_f32, 123),
+ );
+ check(&(BigUint::one() << 127), pow(2.0_f32, 127));
+ check(&(BigUint::from((1u64 << 24) - 1) << (128 - 24)), f32::MAX);
+
+ // keeping all 24 digits with the bits at different offsets to the BigDigits
+ let x: u32 = 0b00000000101111011111011011011101;
+ let mut f = x as f32;
+ let mut b = BigUint::from(x);
+ for _ in 0..64 {
+ check(&b, f);
+ f *= 2.0;
+ b <<= 1;
+ }
+
+ // this number when rounded to f64 then f32 isn't the same as when rounded straight to f32
+ let n: u64 = 0b0000000000111111111111111111111111011111111111111111111111111111;
+ assert!((n as f64) as f32 != n as f32);
+ assert_eq!(BigUint::from(n).to_f32(), Some(n as f32));
+
+ // test rounding up with the bits at different offsets to the BigDigits
+ let mut f = ((1u64 << 25) - 1) as f32;
+ let mut b = BigUint::from(1u64 << 25);
+ for _ in 0..64 {
+ assert_eq!(b.to_f32(), Some(f));
+ f *= 2.0;
+ b <<= 1;
+ }
+
+ // rounding
+ assert_eq!(BigUint::from_f32(-1.0), None);
+ assert_eq!(BigUint::from_f32(-0.99999), Some(BigUint::zero()));
+ assert_eq!(BigUint::from_f32(-0.5), Some(BigUint::zero()));
+ assert_eq!(BigUint::from_f32(-0.0), Some(BigUint::zero()));
+ assert_eq!(
+ BigUint::from_f32(f32::MIN_POSITIVE / 2.0),
+ Some(BigUint::zero())
+ );
+ assert_eq!(BigUint::from_f32(f32::MIN_POSITIVE), Some(BigUint::zero()));
+ assert_eq!(BigUint::from_f32(0.5), Some(BigUint::zero()));
+ assert_eq!(BigUint::from_f32(0.99999), Some(BigUint::zero()));
+ assert_eq!(BigUint::from_f32(f32::consts::E), Some(BigUint::from(2u32)));
+ assert_eq!(
+ BigUint::from_f32(f32::consts::PI),
+ Some(BigUint::from(3u32))
+ );
+
+ // special float values
+ assert_eq!(BigUint::from_f32(f32::NAN), None);
+ assert_eq!(BigUint::from_f32(f32::INFINITY), None);
+ assert_eq!(BigUint::from_f32(f32::NEG_INFINITY), None);
+ assert_eq!(BigUint::from_f32(f32::MIN), None);
+
+ // largest BigUint that will round to a finite f32 value
+ let big_num = (BigUint::one() << 128u8) - 1u8 - (BigUint::one() << (128u8 - 25));
+ assert_eq!(big_num.to_f32(), Some(f32::MAX));
+ assert_eq!((big_num + 1u8).to_f32(), Some(f32::INFINITY));
+
+ assert_eq!(
+ ((BigUint::one() << 128u8) - 1u8).to_f32(),
+ Some(f32::INFINITY)
+ );
+ assert_eq!((BigUint::one() << 128u8).to_f32(), Some(f32::INFINITY));
+}
+
+#[test]
+#[allow(clippy::float_cmp)]
+fn test_convert_f64() {
+ fn check(b1: &BigUint, f: f64) {
+ let b2 = BigUint::from_f64(f).unwrap();
+ assert_eq!(b1, &b2);
+ assert_eq!(b1.to_f64().unwrap(), f);
+ }
+
+ check(&BigUint::zero(), 0.0);
+ check(&BigUint::one(), 1.0);
+ check(&BigUint::from(u32::MAX), pow(2.0_f64, 32) - 1.0);
+ check(&BigUint::from(1u64 << 32), pow(2.0_f64, 32));
+ check(&BigUint::from_slice(&[0, 0, 1]), pow(2.0_f64, 64));
+ check(
+ &((BigUint::one() << 100) + (BigUint::one() << 152)),
+ pow(2.0_f64, 100) + pow(2.0_f64, 152),
+ );
+ check(&(BigUint::one() << 1023), pow(2.0_f64, 1023));
+ check(&(BigUint::from((1u64 << 53) - 1) << (1024 - 53)), f64::MAX);
+
+ // keeping all 53 digits with the bits at different offsets to the BigDigits
+ let x: u64 = 0b0000000000011110111110110111111101110111101111011111011011011101;
+ let mut f = x as f64;
+ let mut b = BigUint::from(x);
+ for _ in 0..128 {
+ check(&b, f);
+ f *= 2.0;
+ b <<= 1;
+ }
+
+ // test rounding up with the bits at different offsets to the BigDigits
+ let mut f = ((1u64 << 54) - 1) as f64;
+ let mut b = BigUint::from(1u64 << 54);
+ for _ in 0..128 {
+ assert_eq!(b.to_f64(), Some(f));
+ f *= 2.0;
+ b <<= 1;
+ }
+
+ // rounding
+ assert_eq!(BigUint::from_f64(-1.0), None);
+ assert_eq!(BigUint::from_f64(-0.99999), Some(BigUint::zero()));
+ assert_eq!(BigUint::from_f64(-0.5), Some(BigUint::zero()));
+ assert_eq!(BigUint::from_f64(-0.0), Some(BigUint::zero()));
+ assert_eq!(
+ BigUint::from_f64(f64::MIN_POSITIVE / 2.0),
+ Some(BigUint::zero())
+ );
+ assert_eq!(BigUint::from_f64(f64::MIN_POSITIVE), Some(BigUint::zero()));
+ assert_eq!(BigUint::from_f64(0.5), Some(BigUint::zero()));
+ assert_eq!(BigUint::from_f64(0.99999), Some(BigUint::zero()));
+ assert_eq!(BigUint::from_f64(f64::consts::E), Some(BigUint::from(2u32)));
+ assert_eq!(
+ BigUint::from_f64(f64::consts::PI),
+ Some(BigUint::from(3u32))
+ );
+
+ // special float values
+ assert_eq!(BigUint::from_f64(f64::NAN), None);
+ assert_eq!(BigUint::from_f64(f64::INFINITY), None);
+ assert_eq!(BigUint::from_f64(f64::NEG_INFINITY), None);
+ assert_eq!(BigUint::from_f64(f64::MIN), None);
+
+ // largest BigUint that will round to a finite f64 value
+ let big_num = (BigUint::one() << 1024u16) - 1u8 - (BigUint::one() << (1024u16 - 54));
+ assert_eq!(big_num.to_f64(), Some(f64::MAX));
+ assert_eq!((big_num + 1u8).to_f64(), Some(f64::INFINITY));
+
+ assert_eq!(
+ ((BigUint::one() << 1024u16) - 1u8).to_f64(),
+ Some(f64::INFINITY)
+ );
+ assert_eq!((BigUint::one() << 1024u16).to_f64(), Some(f64::INFINITY));
+}
+
+#[test]
+fn test_convert_to_bigint() {
+ fn check(n: BigUint, ans: BigInt) {
+ assert_eq!(n.to_bigint().unwrap(), ans);
+ assert_eq!(n.to_bigint().unwrap().to_biguint().unwrap(), n);
+ }
+ check(Zero::zero(), Zero::zero());
+ check(
+ BigUint::new(vec![1, 2, 3]),
+ BigInt::from_biguint(Plus, BigUint::new(vec![1, 2, 3])),
+ );
+}
+
+#[test]
+fn test_convert_from_uint() {
+ macro_rules! check {
+ ($ty:ident, $max:expr) => {
+ assert_eq!(BigUint::from($ty::zero()), BigUint::zero());
+ assert_eq!(BigUint::from($ty::one()), BigUint::one());
+ assert_eq!(BigUint::from($ty::MAX - $ty::one()), $max - BigUint::one());
+ assert_eq!(BigUint::from($ty::MAX), $max);
+ };
+ }
+
+ check!(u8, BigUint::from_slice(&[u8::MAX as u32]));
+ check!(u16, BigUint::from_slice(&[u16::MAX as u32]));
+ check!(u32, BigUint::from_slice(&[u32::MAX]));
+ check!(u64, BigUint::from_slice(&[u32::MAX, u32::MAX]));
+ check!(
+ u128,
+ BigUint::from_slice(&[u32::MAX, u32::MAX, u32::MAX, u32::MAX])
+ );
+ check!(usize, BigUint::from(usize::MAX as u64));
+}
+
+#[test]
+fn test_add() {
+ for elm in SUM_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+
+ assert_op!(a + b == c);
+ assert_op!(b + a == c);
+ assert_assign_op!(a += b == c);
+ assert_assign_op!(b += a == c);
+ }
+}
+
+#[test]
+fn test_sub() {
+ for elm in SUM_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+
+ assert_op!(c - a == b);
+ assert_op!(c - b == a);
+ assert_assign_op!(c -= a == b);
+ assert_assign_op!(c -= b == a);
+ }
+}
+
+#[test]
+#[should_panic]
+fn test_sub_fail_on_underflow() {
+ let (a, b): (BigUint, BigUint) = (Zero::zero(), One::one());
+ let _ = a - b;
+}
+
+#[test]
+fn test_mul() {
+ for elm in MUL_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+
+ assert_op!(a * b == c);
+ assert_op!(b * a == c);
+ assert_assign_op!(a *= b == c);
+ assert_assign_op!(b *= a == c);
+ }
+
+ for elm in DIV_REM_QUADRUPLES.iter() {
+ let (a_vec, b_vec, c_vec, d_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+ let d = BigUint::from_slice(d_vec);
+
+ assert!(a == &b * &c + &d);
+ assert!(a == &c * &b + &d);
+ }
+}
+
+#[test]
+fn test_div_rem() {
+ for elm in MUL_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+
+ if !a.is_zero() {
+ assert_op!(c / a == b);
+ assert_op!(c % a == BigUint::zero());
+ assert_assign_op!(c /= a == b);
+ assert_assign_op!(c %= a == BigUint::zero());
+ assert_eq!(c.div_rem(&a), (b.clone(), BigUint::zero()));
+ }
+ if !b.is_zero() {
+ assert_op!(c / b == a);
+ assert_op!(c % b == BigUint::zero());
+ assert_assign_op!(c /= b == a);
+ assert_assign_op!(c %= b == BigUint::zero());
+ assert_eq!(c.div_rem(&b), (a.clone(), BigUint::zero()));
+ }
+ }
+
+ for elm in DIV_REM_QUADRUPLES.iter() {
+ let (a_vec, b_vec, c_vec, d_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+ let d = BigUint::from_slice(d_vec);
+
+ if !b.is_zero() {
+ assert_op!(a / b == c);
+ assert_op!(a % b == d);
+ assert_assign_op!(a /= b == c);
+ assert_assign_op!(a %= b == d);
+ assert!(a.div_rem(&b) == (c, d));
+ }
+ }
+}
+
+#[test]
+fn test_div_rem_big_multiple() {
+ let a = BigUint::from(3u32).pow(100u32);
+ let a2 = &a * &a;
+
+ let (div, rem) = a2.div_rem(&a);
+ assert_eq!(div, a);
+ assert!(rem.is_zero());
+
+ let (div, rem) = (&a2 - 1u32).div_rem(&a);
+ assert_eq!(div, &a - 1u32);
+ assert_eq!(rem, &a - 1u32);
+}
+
+#[test]
+fn test_div_ceil() {
+ fn check(a: &BigUint, b: &BigUint, d: &BigUint, m: &BigUint) {
+ if m.is_zero() {
+ assert_eq!(a.div_ceil(b), *d);
+ } else {
+ assert_eq!(a.div_ceil(b), d + 1u32);
+ }
+ }
+
+ for elm in MUL_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+
+ if !a.is_zero() {
+ check(&c, &a, &b, &Zero::zero());
+ }
+ if !b.is_zero() {
+ check(&c, &b, &a, &Zero::zero());
+ }
+ }
+
+ for elm in DIV_REM_QUADRUPLES.iter() {
+ let (a_vec, b_vec, c_vec, d_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+ let d = BigUint::from_slice(d_vec);
+
+ if !b.is_zero() {
+ check(&a, &b, &c, &d);
+ }
+ }
+}
+
+#[test]
+fn test_checked_add() {
+ for elm in SUM_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+
+ assert!(a.checked_add(&b).unwrap() == c);
+ assert!(b.checked_add(&a).unwrap() == c);
+ }
+}
+
+#[test]
+fn test_checked_sub() {
+ for elm in SUM_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+
+ assert!(c.checked_sub(&a).unwrap() == b);
+ assert!(c.checked_sub(&b).unwrap() == a);
+
+ if a > c {
+ assert!(a.checked_sub(&c).is_none());
+ }
+ if b > c {
+ assert!(b.checked_sub(&c).is_none());
+ }
+ }
+}
+
+#[test]
+fn test_checked_mul() {
+ for elm in MUL_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+
+ assert!(a.checked_mul(&b).unwrap() == c);
+ assert!(b.checked_mul(&a).unwrap() == c);
+ }
+
+ for elm in DIV_REM_QUADRUPLES.iter() {
+ let (a_vec, b_vec, c_vec, d_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+ let d = BigUint::from_slice(d_vec);
+
+ assert!(a == b.checked_mul(&c).unwrap() + &d);
+ assert!(a == c.checked_mul(&b).unwrap() + &d);
+ }
+}
+
+#[test]
+fn test_mul_overflow() {
+ // Test for issue #187 - overflow due to mac3 incorrectly sizing temporary
+ let s = "531137992816767098689588206552468627329593117727031923199444138200403559860852242739162502232636710047537552105951370000796528760829212940754539968588340162273730474622005920097370111";
+ let a: BigUint = s.parse().unwrap();
+ let b = a.clone();
+ let _ = a.checked_mul(&b);
+}
+
+#[test]
+fn test_checked_div() {
+ for elm in MUL_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+
+ if !a.is_zero() {
+ assert!(c.checked_div(&a).unwrap() == b);
+ }
+ if !b.is_zero() {
+ assert!(c.checked_div(&b).unwrap() == a);
+ }
+
+ assert!(c.checked_div(&Zero::zero()).is_none());
+ }
+}
+
+#[test]
+fn test_gcd() {
+ fn check(a: usize, b: usize, c: usize) {
+ let big_a: BigUint = FromPrimitive::from_usize(a).unwrap();
+ let big_b: BigUint = FromPrimitive::from_usize(b).unwrap();
+ let big_c: BigUint = FromPrimitive::from_usize(c).unwrap();
+
+ assert_eq!(big_a.gcd(&big_b), big_c);
+ assert_eq!(big_a.gcd_lcm(&big_b).0, big_c);
+ }
+
+ check(10, 2, 2);
+ check(10, 3, 1);
+ check(0, 3, 3);
+ check(3, 3, 3);
+ check(56, 42, 14);
+}
+
+#[test]
+fn test_lcm() {
+ fn check(a: usize, b: usize, c: usize) {
+ let big_a: BigUint = FromPrimitive::from_usize(a).unwrap();
+ let big_b: BigUint = FromPrimitive::from_usize(b).unwrap();
+ let big_c: BigUint = FromPrimitive::from_usize(c).unwrap();
+
+ assert_eq!(big_a.lcm(&big_b), big_c);
+ assert_eq!(big_a.gcd_lcm(&big_b).1, big_c);
+ }
+
+ check(0, 0, 0);
+ check(1, 0, 0);
+ check(0, 1, 0);
+ check(1, 1, 1);
+ check(8, 9, 72);
+ check(11, 5, 55);
+ check(99, 17, 1683);
+}
+
+#[test]
+fn test_next_multiple_of() {
+ assert_eq!(
+ BigUint::from(16u32).next_multiple_of(&BigUint::from(8u32)),
+ BigUint::from(16u32)
+ );
+ assert_eq!(
+ BigUint::from(23u32).next_multiple_of(&BigUint::from(8u32)),
+ BigUint::from(24u32)
+ );
+}
+
+#[test]
+fn test_prev_multiple_of() {
+ assert_eq!(
+ BigUint::from(16u32).prev_multiple_of(&BigUint::from(8u32)),
+ BigUint::from(16u32)
+ );
+ assert_eq!(
+ BigUint::from(23u32).prev_multiple_of(&BigUint::from(8u32)),
+ BigUint::from(16u32)
+ );
+}
+
+#[test]
+fn test_is_even() {
+ let one: BigUint = FromStr::from_str("1").unwrap();
+ let two: BigUint = FromStr::from_str("2").unwrap();
+ let thousand: BigUint = FromStr::from_str("1000").unwrap();
+ let big: BigUint = FromStr::from_str("1000000000000000000000").unwrap();
+ let bigger: BigUint = FromStr::from_str("1000000000000000000001").unwrap();
+ assert!(one.is_odd());
+ assert!(two.is_even());
+ assert!(thousand.is_even());
+ assert!(big.is_even());
+ assert!(bigger.is_odd());
+ assert!((&one << 64u8).is_even());
+ assert!(((&one << 64u8) + one).is_odd());
+}
+
+fn to_str_pairs() -> Vec<(BigUint, Vec<(u32, String)>)> {
+ let bits = 32;
+ vec![
+ (
+ Zero::zero(),
+ vec![(2, "0".to_string()), (3, "0".to_string())],
+ ),
+ (
+ BigUint::from_slice(&[0xff]),
+ vec![
+ (2, "11111111".to_string()),
+ (3, "100110".to_string()),
+ (4, "3333".to_string()),
+ (5, "2010".to_string()),
+ (6, "1103".to_string()),
+ (7, "513".to_string()),
+ (8, "377".to_string()),
+ (9, "313".to_string()),
+ (10, "255".to_string()),
+ (11, "212".to_string()),
+ (12, "193".to_string()),
+ (13, "168".to_string()),
+ (14, "143".to_string()),
+ (15, "120".to_string()),
+ (16, "ff".to_string()),
+ ],
+ ),
+ (
+ BigUint::from_slice(&[0xfff]),
+ vec![
+ (2, "111111111111".to_string()),
+ (4, "333333".to_string()),
+ (16, "fff".to_string()),
+ ],
+ ),
+ (
+ BigUint::from_slice(&[1, 2]),
+ vec![
+ (
+ 2,
+ format!("10{}1", repeat("0").take(bits - 1).collect::<String>()),
+ ),
+ (
+ 4,
+ format!("2{}1", repeat("0").take(bits / 2 - 1).collect::<String>()),
+ ),
+ (
+ 10,
+ match bits {
+ 64 => "36893488147419103233".to_string(),
+ 32 => "8589934593".to_string(),
+ 16 => "131073".to_string(),
+ _ => panic!(),
+ },
+ ),
+ (
+ 16,
+ format!("2{}1", repeat("0").take(bits / 4 - 1).collect::<String>()),
+ ),
+ ],
+ ),
+ (
+ BigUint::from_slice(&[1, 2, 3]),
+ vec![
+ (
+ 2,
+ format!(
+ "11{}10{}1",
+ repeat("0").take(bits - 2).collect::<String>(),
+ repeat("0").take(bits - 1).collect::<String>()
+ ),
+ ),
+ (
+ 4,
+ format!(
+ "3{}2{}1",
+ repeat("0").take(bits / 2 - 1).collect::<String>(),
+ repeat("0").take(bits / 2 - 1).collect::<String>()
+ ),
+ ),
+ (
+ 8,
+ match bits {
+ 64 => "14000000000000000000004000000000000000000001".to_string(),
+ 32 => "6000000000100000000001".to_string(),
+ 16 => "140000400001".to_string(),
+ _ => panic!(),
+ },
+ ),
+ (
+ 10,
+ match bits {
+ 64 => "1020847100762815390427017310442723737601".to_string(),
+ 32 => "55340232229718589441".to_string(),
+ 16 => "12885032961".to_string(),
+ _ => panic!(),
+ },
+ ),
+ (
+ 16,
+ format!(
+ "3{}2{}1",
+ repeat("0").take(bits / 4 - 1).collect::<String>(),
+ repeat("0").take(bits / 4 - 1).collect::<String>()
+ ),
+ ),
+ ],
+ ),
+ ]
+}
+
+#[test]
+fn test_to_str_radix() {
+ let r = to_str_pairs();
+ for num_pair in r.iter() {
+ let &(ref n, ref rs) = num_pair;
+ for str_pair in rs.iter() {
+ let &(ref radix, ref str) = str_pair;
+ assert_eq!(n.to_str_radix(*radix), *str);
+ }
+ }
+}
+
+#[test]
+fn test_from_and_to_radix() {
+ const GROUND_TRUTH: &[(&[u8], u32, &[u8])] = &[
+ (b"0", 42, &[0]),
+ (
+ b"ffffeeffbb",
+ 2,
+ &[
+ 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ ],
+ ),
+ (
+ b"ffffeeffbb",
+ 3,
+ &[
+ 2, 2, 1, 1, 2, 1, 1, 2, 0, 0, 0, 0, 0, 1, 2, 0, 0, 0, 0, 1, 0, 0, 2, 2, 0, 1,
+ ],
+ ),
+ (
+ b"ffffeeffbb",
+ 4,
+ &[3, 2, 3, 2, 3, 3, 3, 3, 2, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3],
+ ),
+ (
+ b"ffffeeffbb",
+ 5,
+ &[0, 4, 3, 3, 1, 4, 2, 4, 1, 4, 4, 2, 3, 0, 0, 1, 2, 1],
+ ),
+ (
+ b"ffffeeffbb",
+ 6,
+ &[5, 5, 4, 5, 5, 0, 0, 1, 2, 5, 3, 0, 1, 0, 2, 2],
+ ),
+ (
+ b"ffffeeffbb",
+ 7,
+ &[4, 2, 3, 6, 0, 1, 6, 1, 6, 2, 0, 3, 2, 4, 1],
+ ),
+ (
+ b"ffffeeffbb",
+ 8,
+ &[3, 7, 6, 7, 7, 5, 3, 7, 7, 7, 7, 7, 7, 1],
+ ),
+ (b"ffffeeffbb", 9, &[8, 4, 5, 7, 0, 0, 3, 2, 0, 3, 0, 8, 3]),
+ (b"ffffeeffbb", 10, &[5, 9, 5, 3, 1, 5, 0, 1, 5, 9, 9, 0, 1]),
+ (b"ffffeeffbb", 11, &[10, 7, 6, 5, 2, 0, 3, 3, 3, 4, 9, 3]),
+ (b"ffffeeffbb", 12, &[11, 8, 5, 10, 1, 10, 3, 1, 1, 9, 5, 1]),
+ (b"ffffeeffbb", 13, &[0, 5, 7, 4, 6, 5, 6, 11, 8, 12, 7]),
+ (b"ffffeeffbb", 14, &[11, 4, 4, 11, 8, 4, 6, 0, 3, 11, 3]),
+ (b"ffffeeffbb", 15, &[5, 11, 13, 2, 1, 10, 2, 0, 9, 13, 1]),
+ (b"ffffeeffbb", 16, &[11, 11, 15, 15, 14, 14, 15, 15, 15, 15]),
+ (b"ffffeeffbb", 17, &[0, 2, 14, 12, 2, 14, 8, 10, 4, 9]),
+ (b"ffffeeffbb", 18, &[17, 15, 5, 13, 10, 16, 16, 13, 9, 5]),
+ (b"ffffeeffbb", 19, &[14, 13, 2, 8, 9, 0, 1, 14, 7, 3]),
+ (b"ffffeeffbb", 20, &[15, 19, 3, 14, 0, 17, 19, 18, 2, 2]),
+ (b"ffffeeffbb", 21, &[11, 5, 4, 13, 5, 18, 9, 1, 8, 1]),
+ (b"ffffeeffbb", 22, &[21, 3, 7, 21, 15, 12, 17, 0, 20]),
+ (b"ffffeeffbb", 23, &[21, 21, 6, 9, 10, 7, 21, 0, 14]),
+ (b"ffffeeffbb", 24, &[11, 10, 19, 14, 22, 11, 17, 23, 9]),
+ (b"ffffeeffbb", 25, &[20, 18, 21, 22, 21, 14, 3, 5, 7]),
+ (b"ffffeeffbb", 26, &[13, 15, 24, 11, 17, 6, 23, 6, 5]),
+ (b"ffffeeffbb", 27, &[17, 16, 7, 0, 21, 0, 3, 24, 3]),
+ (b"ffffeeffbb", 28, &[11, 16, 11, 15, 14, 18, 13, 25, 2]),
+ (b"ffffeeffbb", 29, &[6, 8, 7, 19, 14, 13, 21, 5, 2]),
+ (b"ffffeeffbb", 30, &[5, 13, 18, 11, 10, 7, 8, 20, 1]),
+ (b"ffffeeffbb", 31, &[22, 26, 15, 19, 8, 27, 29, 8, 1]),
+ (b"ffffeeffbb", 32, &[27, 29, 31, 29, 30, 31, 31, 31]),
+ (b"ffffeeffbb", 33, &[32, 20, 27, 12, 1, 12, 26, 25]),
+ (b"ffffeeffbb", 34, &[17, 9, 16, 33, 13, 25, 31, 20]),
+ (b"ffffeeffbb", 35, &[25, 32, 2, 25, 11, 4, 3, 17]),
+ (b"ffffeeffbb", 36, &[35, 34, 5, 6, 32, 3, 1, 14]),
+ (b"ffffeeffbb", 37, &[16, 21, 18, 4, 33, 19, 21, 11]),
+ (b"ffffeeffbb", 38, &[33, 25, 19, 29, 20, 6, 23, 9]),
+ (b"ffffeeffbb", 39, &[26, 27, 29, 23, 16, 18, 0, 8]),
+ (b"ffffeeffbb", 40, &[35, 39, 30, 11, 16, 17, 28, 6]),
+ (b"ffffeeffbb", 41, &[36, 30, 9, 18, 12, 19, 26, 5]),
+ (b"ffffeeffbb", 42, &[11, 34, 37, 27, 1, 13, 32, 4]),
+ (b"ffffeeffbb", 43, &[3, 24, 11, 2, 10, 40, 1, 4]),
+ (b"ffffeeffbb", 44, &[43, 12, 40, 32, 3, 23, 19, 3]),
+ (b"ffffeeffbb", 45, &[35, 38, 44, 18, 22, 18, 42, 2]),
+ (b"ffffeeffbb", 46, &[21, 45, 18, 41, 17, 2, 24, 2]),
+ (b"ffffeeffbb", 47, &[37, 37, 11, 12, 6, 0, 8, 2]),
+ (b"ffffeeffbb", 48, &[11, 41, 40, 43, 5, 43, 41, 1]),
+ (b"ffffeeffbb", 49, &[18, 45, 7, 13, 20, 21, 30, 1]),
+ (b"ffffeeffbb", 50, &[45, 21, 5, 34, 21, 18, 20, 1]),
+ (b"ffffeeffbb", 51, &[17, 6, 26, 22, 38, 24, 11, 1]),
+ (b"ffffeeffbb", 52, &[39, 33, 38, 30, 46, 31, 3, 1]),
+ (b"ffffeeffbb", 53, &[31, 7, 44, 23, 9, 32, 49]),
+ (b"ffffeeffbb", 54, &[17, 35, 8, 37, 31, 18, 44]),
+ (b"ffffeeffbb", 55, &[10, 52, 9, 48, 36, 39, 39]),
+ (b"ffffeeffbb", 56, &[11, 50, 51, 22, 25, 36, 35]),
+ (b"ffffeeffbb", 57, &[14, 55, 12, 43, 20, 3, 32]),
+ (b"ffffeeffbb", 58, &[35, 18, 45, 56, 9, 51, 28]),
+ (b"ffffeeffbb", 59, &[51, 28, 20, 26, 55, 3, 26]),
+ (b"ffffeeffbb", 60, &[35, 6, 27, 46, 58, 33, 23]),
+ (b"ffffeeffbb", 61, &[58, 7, 6, 54, 49, 20, 21]),
+ (b"ffffeeffbb", 62, &[53, 59, 3, 14, 10, 22, 19]),
+ (b"ffffeeffbb", 63, &[53, 50, 23, 4, 56, 36, 17]),
+ (b"ffffeeffbb", 64, &[59, 62, 47, 59, 63, 63, 15]),
+ (b"ffffeeffbb", 65, &[0, 53, 39, 4, 40, 37, 14]),
+ (b"ffffeeffbb", 66, &[65, 59, 39, 1, 64, 19, 13]),
+ (b"ffffeeffbb", 67, &[35, 14, 19, 16, 25, 10, 12]),
+ (b"ffffeeffbb", 68, &[51, 38, 63, 50, 15, 8, 11]),
+ (b"ffffeeffbb", 69, &[44, 45, 18, 58, 68, 12, 10]),
+ (b"ffffeeffbb", 70, &[25, 51, 0, 60, 13, 24, 9]),
+ (b"ffffeeffbb", 71, &[54, 30, 9, 65, 28, 41, 8]),
+ (b"ffffeeffbb", 72, &[35, 35, 55, 54, 17, 64, 7]),
+ (b"ffffeeffbb", 73, &[34, 4, 48, 40, 27, 19, 7]),
+ (b"ffffeeffbb", 74, &[53, 47, 4, 56, 36, 51, 6]),
+ (b"ffffeeffbb", 75, &[20, 56, 10, 72, 24, 13, 6]),
+ (b"ffffeeffbb", 76, &[71, 31, 52, 60, 48, 53, 5]),
+ (b"ffffeeffbb", 77, &[32, 73, 14, 63, 15, 21, 5]),
+ (b"ffffeeffbb", 78, &[65, 13, 17, 32, 64, 68, 4]),
+ (b"ffffeeffbb", 79, &[37, 56, 2, 56, 25, 41, 4]),
+ (b"ffffeeffbb", 80, &[75, 59, 37, 41, 43, 15, 4]),
+ (b"ffffeeffbb", 81, &[44, 68, 0, 21, 27, 72, 3]),
+ (b"ffffeeffbb", 82, &[77, 35, 2, 74, 46, 50, 3]),
+ (b"ffffeeffbb", 83, &[52, 51, 19, 76, 10, 30, 3]),
+ (b"ffffeeffbb", 84, &[11, 80, 19, 19, 76, 10, 3]),
+ (b"ffffeeffbb", 85, &[0, 82, 20, 14, 68, 77, 2]),
+ (b"ffffeeffbb", 86, &[3, 12, 78, 37, 62, 61, 2]),
+ (b"ffffeeffbb", 87, &[35, 12, 20, 8, 52, 46, 2]),
+ (b"ffffeeffbb", 88, &[43, 6, 54, 42, 30, 32, 2]),
+ (b"ffffeeffbb", 89, &[49, 52, 85, 21, 80, 18, 2]),
+ (b"ffffeeffbb", 90, &[35, 64, 78, 24, 18, 6, 2]),
+ (b"ffffeeffbb", 91, &[39, 17, 83, 63, 17, 85, 1]),
+ (b"ffffeeffbb", 92, &[67, 22, 85, 79, 75, 74, 1]),
+ (b"ffffeeffbb", 93, &[53, 60, 39, 29, 4, 65, 1]),
+ (b"ffffeeffbb", 94, &[37, 89, 2, 72, 76, 55, 1]),
+ (b"ffffeeffbb", 95, &[90, 74, 89, 9, 9, 47, 1]),
+ (b"ffffeeffbb", 96, &[59, 20, 46, 35, 81, 38, 1]),
+ (b"ffffeeffbb", 97, &[94, 87, 60, 71, 3, 31, 1]),
+ (b"ffffeeffbb", 98, &[67, 22, 63, 50, 62, 23, 1]),
+ (b"ffffeeffbb", 99, &[98, 6, 69, 12, 61, 16, 1]),
+ (b"ffffeeffbb", 100, &[95, 35, 51, 10, 95, 9, 1]),
+ (b"ffffeeffbb", 101, &[87, 27, 7, 8, 62, 3, 1]),
+ (b"ffffeeffbb", 102, &[17, 3, 32, 79, 59, 99]),
+ (b"ffffeeffbb", 103, &[30, 22, 90, 0, 87, 94]),
+ (b"ffffeeffbb", 104, &[91, 68, 87, 68, 38, 90]),
+ (b"ffffeeffbb", 105, &[95, 80, 54, 73, 15, 86]),
+ (b"ffffeeffbb", 106, &[31, 30, 24, 16, 17, 82]),
+ (b"ffffeeffbb", 107, &[51, 50, 10, 12, 42, 78]),
+ (b"ffffeeffbb", 108, &[71, 71, 96, 78, 89, 74]),
+ (b"ffffeeffbb", 109, &[33, 18, 93, 22, 50, 71]),
+ (b"ffffeeffbb", 110, &[65, 53, 57, 88, 29, 68]),
+ (b"ffffeeffbb", 111, &[53, 93, 67, 90, 27, 65]),
+ (b"ffffeeffbb", 112, &[11, 109, 96, 65, 43, 62]),
+ (b"ffffeeffbb", 113, &[27, 23, 106, 56, 76, 59]),
+ (b"ffffeeffbb", 114, &[71, 84, 31, 112, 11, 57]),
+ (b"ffffeeffbb", 115, &[90, 22, 1, 56, 76, 54]),
+ (b"ffffeeffbb", 116, &[35, 38, 98, 57, 40, 52]),
+ (b"ffffeeffbb", 117, &[26, 113, 115, 62, 17, 50]),
+ (b"ffffeeffbb", 118, &[51, 14, 5, 18, 7, 48]),
+ (b"ffffeeffbb", 119, &[102, 31, 110, 108, 8, 46]),
+ (b"ffffeeffbb", 120, &[35, 93, 96, 50, 22, 44]),
+ (b"ffffeeffbb", 121, &[87, 61, 2, 36, 47, 42]),
+ (b"ffffeeffbb", 122, &[119, 64, 1, 22, 83, 40]),
+ (b"ffffeeffbb", 123, &[77, 119, 32, 90, 6, 39]),
+ (b"ffffeeffbb", 124, &[115, 122, 31, 79, 62, 37]),
+ (b"ffffeeffbb", 125, &[95, 108, 47, 74, 3, 36]),
+ (b"ffffeeffbb", 126, &[53, 25, 116, 39, 78, 34]),
+ (b"ffffeeffbb", 127, &[22, 23, 125, 67, 35, 33]),
+ (b"ffffeeffbb", 128, &[59, 127, 59, 127, 127, 31]),
+ (b"ffffeeffbb", 129, &[89, 36, 1, 59, 100, 30]),
+ (b"ffffeeffbb", 130, &[65, 91, 123, 89, 79, 29]),
+ (b"ffffeeffbb", 131, &[58, 72, 39, 63, 65, 28]),
+ (b"ffffeeffbb", 132, &[131, 62, 92, 82, 57, 27]),
+ (b"ffffeeffbb", 133, &[109, 31, 51, 123, 55, 26]),
+ (b"ffffeeffbb", 134, &[35, 74, 21, 27, 60, 25]),
+ (b"ffffeeffbb", 135, &[125, 132, 49, 37, 70, 24]),
+ (b"ffffeeffbb", 136, &[51, 121, 117, 133, 85, 23]),
+ (b"ffffeeffbb", 137, &[113, 60, 135, 22, 107, 22]),
+ (b"ffffeeffbb", 138, &[113, 91, 73, 93, 133, 21]),
+ (b"ffffeeffbb", 139, &[114, 75, 102, 51, 26, 21]),
+ (b"ffffeeffbb", 140, &[95, 25, 35, 16, 62, 20]),
+ (b"ffffeeffbb", 141, &[131, 137, 16, 110, 102, 19]),
+ (b"ffffeeffbb", 142, &[125, 121, 108, 34, 6, 19]),
+ (b"ffffeeffbb", 143, &[65, 78, 138, 55, 55, 18]),
+ (b"ffffeeffbb", 144, &[107, 125, 121, 15, 109, 17]),
+ (b"ffffeeffbb", 145, &[35, 13, 122, 42, 22, 17]),
+ (b"ffffeeffbb", 146, &[107, 38, 103, 123, 83, 16]),
+ (b"ffffeeffbb", 147, &[116, 96, 71, 98, 2, 16]),
+ (b"ffffeeffbb", 148, &[127, 23, 75, 99, 71, 15]),
+ (b"ffffeeffbb", 149, &[136, 110, 53, 114, 144, 14]),
+ (b"ffffeeffbb", 150, &[95, 140, 133, 130, 71, 14]),
+ (b"ffffeeffbb", 151, &[15, 50, 29, 137, 0, 14]),
+ (b"ffffeeffbb", 152, &[147, 15, 89, 121, 83, 13]),
+ (b"ffffeeffbb", 153, &[17, 87, 93, 72, 17, 13]),
+ (b"ffffeeffbb", 154, &[109, 113, 3, 133, 106, 12]),
+ (b"ffffeeffbb", 155, &[115, 141, 120, 139, 44, 12]),
+ (b"ffffeeffbb", 156, &[143, 45, 4, 82, 140, 11]),
+ (b"ffffeeffbb", 157, &[149, 92, 15, 106, 82, 11]),
+ (b"ffffeeffbb", 158, &[37, 107, 79, 46, 26, 11]),
+ (b"ffffeeffbb", 159, &[137, 37, 146, 51, 130, 10]),
+ (b"ffffeeffbb", 160, &[155, 69, 29, 115, 77, 10]),
+ (b"ffffeeffbb", 161, &[67, 98, 46, 68, 26, 10]),
+ (b"ffffeeffbb", 162, &[125, 155, 60, 63, 138, 9]),
+ (b"ffffeeffbb", 163, &[96, 43, 118, 93, 90, 9]),
+ (b"ffffeeffbb", 164, &[159, 99, 123, 152, 43, 9]),
+ (b"ffffeeffbb", 165, &[65, 17, 1, 69, 163, 8]),
+ (b"ffffeeffbb", 166, &[135, 108, 25, 165, 119, 8]),
+ (b"ffffeeffbb", 167, &[165, 116, 164, 103, 77, 8]),
+ (b"ffffeeffbb", 168, &[11, 166, 67, 44, 36, 8]),
+ (b"ffffeeffbb", 169, &[65, 59, 71, 149, 164, 7]),
+ (b"ffffeeffbb", 170, &[85, 83, 26, 76, 126, 7]),
+ (b"ffffeeffbb", 171, &[71, 132, 140, 157, 88, 7]),
+ (b"ffffeeffbb", 172, &[3, 6, 127, 47, 52, 7]),
+ (b"ffffeeffbb", 173, &[122, 66, 53, 83, 16, 7]),
+ (b"ffffeeffbb", 174, &[35, 6, 5, 88, 155, 6]),
+ (b"ffffeeffbb", 175, &[95, 20, 84, 56, 122, 6]),
+ (b"ffffeeffbb", 176, &[43, 91, 57, 159, 89, 6]),
+ (b"ffffeeffbb", 177, &[110, 127, 54, 40, 58, 6]),
+ (b"ffffeeffbb", 178, &[49, 115, 43, 47, 27, 6]),
+ (b"ffffeeffbb", 179, &[130, 91, 4, 178, 175, 5]),
+ (b"ffffeeffbb", 180, &[35, 122, 109, 70, 147, 5]),
+ (b"ffffeeffbb", 181, &[94, 94, 4, 79, 119, 5]),
+ (b"ffffeeffbb", 182, &[39, 54, 66, 19, 92, 5]),
+ (b"ffffeeffbb", 183, &[119, 2, 143, 69, 65, 5]),
+ (b"ffffeeffbb", 184, &[67, 57, 90, 44, 39, 5]),
+ (b"ffffeeffbb", 185, &[90, 63, 141, 123, 13, 5]),
+ (b"ffffeeffbb", 186, &[53, 123, 172, 119, 174, 4]),
+ (b"ffffeeffbb", 187, &[153, 21, 68, 28, 151, 4]),
+ (b"ffffeeffbb", 188, &[131, 138, 94, 32, 128, 4]),
+ (b"ffffeeffbb", 189, &[179, 121, 156, 130, 105, 4]),
+ (b"ffffeeffbb", 190, &[185, 179, 164, 131, 83, 4]),
+ (b"ffffeeffbb", 191, &[118, 123, 37, 31, 62, 4]),
+ (b"ffffeeffbb", 192, &[59, 106, 83, 16, 41, 4]),
+ (b"ffffeeffbb", 193, &[57, 37, 47, 86, 20, 4]),
+ (b"ffffeeffbb", 194, &[191, 140, 63, 45, 0, 4]),
+ (b"ffffeeffbb", 195, &[65, 169, 83, 84, 175, 3]),
+ (b"ffffeeffbb", 196, &[67, 158, 64, 6, 157, 3]),
+ (b"ffffeeffbb", 197, &[121, 26, 167, 3, 139, 3]),
+ (b"ffffeeffbb", 198, &[197, 151, 165, 75, 121, 3]),
+ (b"ffffeeffbb", 199, &[55, 175, 36, 22, 104, 3]),
+ (b"ffffeeffbb", 200, &[195, 167, 162, 38, 87, 3]),
+ (b"ffffeeffbb", 201, &[35, 27, 136, 124, 70, 3]),
+ (b"ffffeeffbb", 202, &[87, 64, 153, 76, 54, 3]),
+ (b"ffffeeffbb", 203, &[151, 191, 14, 94, 38, 3]),
+ (b"ffffeeffbb", 204, &[119, 103, 135, 175, 22, 3]),
+ (b"ffffeeffbb", 205, &[200, 79, 123, 115, 7, 3]),
+ (b"ffffeeffbb", 206, &[133, 165, 202, 115, 198, 2]),
+ (b"ffffeeffbb", 207, &[44, 153, 193, 175, 184, 2]),
+ (b"ffffeeffbb", 208, &[91, 190, 125, 86, 171, 2]),
+ (b"ffffeeffbb", 209, &[109, 151, 34, 53, 158, 2]),
+ (b"ffffeeffbb", 210, &[95, 40, 171, 74, 145, 2]),
+ (b"ffffeeffbb", 211, &[84, 195, 162, 150, 132, 2]),
+ (b"ffffeeffbb", 212, &[31, 15, 59, 68, 120, 2]),
+ (b"ffffeeffbb", 213, &[125, 57, 127, 36, 108, 2]),
+ (b"ffffeeffbb", 214, &[51, 132, 2, 55, 96, 2]),
+ (b"ffffeeffbb", 215, &[175, 133, 177, 122, 84, 2]),
+ (b"ffffeeffbb", 216, &[179, 35, 78, 23, 73, 2]),
+ (b"ffffeeffbb", 217, &[53, 101, 208, 186, 61, 2]),
+ (b"ffffeeffbb", 218, &[33, 9, 214, 179, 50, 2]),
+ (b"ffffeeffbb", 219, &[107, 147, 175, 217, 39, 2]),
+ (b"ffffeeffbb", 220, &[175, 81, 179, 79, 29, 2]),
+ (b"ffffeeffbb", 221, &[0, 76, 95, 204, 18, 2]),
+ (b"ffffeeffbb", 222, &[53, 213, 16, 150, 8, 2]),
+ (b"ffffeeffbb", 223, &[158, 161, 42, 136, 221, 1]),
+ (b"ffffeeffbb", 224, &[123, 54, 52, 162, 212, 1]),
+ (b"ffffeeffbb", 225, &[170, 43, 151, 2, 204, 1]),
+ (b"ffffeeffbb", 226, &[27, 68, 224, 105, 195, 1]),
+ (b"ffffeeffbb", 227, &[45, 69, 157, 20, 187, 1]),
+ (b"ffffeeffbb", 228, &[71, 213, 64, 199, 178, 1]),
+ (b"ffffeeffbb", 229, &[129, 203, 66, 186, 170, 1]),
+ (b"ffffeeffbb", 230, &[205, 183, 57, 208, 162, 1]),
+ (b"ffffeeffbb", 231, &[32, 50, 164, 33, 155, 1]),
+ (b"ffffeeffbb", 232, &[35, 135, 53, 123, 147, 1]),
+ (b"ffffeeffbb", 233, &[209, 47, 89, 13, 140, 1]),
+ (b"ffffeeffbb", 234, &[143, 56, 175, 168, 132, 1]),
+ (b"ffffeeffbb", 235, &[225, 157, 216, 121, 125, 1]),
+ (b"ffffeeffbb", 236, &[51, 66, 119, 105, 118, 1]),
+ (b"ffffeeffbb", 237, &[116, 150, 26, 119, 111, 1]),
+ (b"ffffeeffbb", 238, &[221, 15, 87, 162, 104, 1]),
+ (b"ffffeeffbb", 239, &[234, 155, 214, 234, 97, 1]),
+ (b"ffffeeffbb", 240, &[155, 46, 84, 96, 91, 1]),
+ (b"ffffeeffbb", 241, &[187, 48, 90, 225, 84, 1]),
+ (b"ffffeeffbb", 242, &[87, 212, 151, 140, 78, 1]),
+ (b"ffffeeffbb", 243, &[206, 22, 189, 81, 72, 1]),
+ (b"ffffeeffbb", 244, &[119, 93, 122, 48, 66, 1]),
+ (b"ffffeeffbb", 245, &[165, 224, 117, 40, 60, 1]),
+ (b"ffffeeffbb", 246, &[77, 121, 100, 57, 54, 1]),
+ (b"ffffeeffbb", 247, &[52, 128, 242, 98, 48, 1]),
+ (b"ffffeeffbb", 248, &[115, 247, 224, 164, 42, 1]),
+ (b"ffffeeffbb", 249, &[218, 127, 223, 5, 37, 1]),
+ (b"ffffeeffbb", 250, &[95, 54, 168, 118, 31, 1]),
+ (b"ffffeeffbb", 251, &[121, 204, 240, 3, 26, 1]),
+ (b"ffffeeffbb", 252, &[179, 138, 123, 162, 20, 1]),
+ (b"ffffeeffbb", 253, &[21, 50, 1, 91, 15, 1]),
+ (b"ffffeeffbb", 254, &[149, 11, 63, 40, 10, 1]),
+ (b"ffffeeffbb", 255, &[170, 225, 247, 9, 5, 1]),
+ (b"ffffeeffbb", 256, &[187, 255, 238, 255, 255]),
+ ];
+
+ for &(bigint, radix, inbaseradix_le) in GROUND_TRUTH.iter() {
+ let bigint = BigUint::parse_bytes(bigint, 16).unwrap();
+ // to_radix_le
+ assert_eq!(bigint.to_radix_le(radix), inbaseradix_le);
+ // to_radix_be
+ let mut inbase_be = bigint.to_radix_be(radix);
+ inbase_be.reverse(); // now le
+ assert_eq!(inbase_be, inbaseradix_le);
+ // from_radix_le
+ assert_eq!(
+ BigUint::from_radix_le(inbaseradix_le, radix).unwrap(),
+ bigint
+ );
+ // from_radix_be
+ let mut inbaseradix_be = Vec::from(inbaseradix_le);
+ inbaseradix_be.reverse();
+ assert_eq!(
+ BigUint::from_radix_be(&inbaseradix_be, radix).unwrap(),
+ bigint
+ );
+ }
+
+ assert!(BigUint::from_radix_le(&[10, 100, 10], 50).is_none());
+ assert_eq!(BigUint::from_radix_le(&[], 2), Some(BigUint::zero()));
+ assert_eq!(BigUint::from_radix_be(&[], 2), Some(BigUint::zero()));
+}
+
+#[test]
+fn test_from_str_radix() {
+ let r = to_str_pairs();
+ for num_pair in r.iter() {
+ let &(ref n, ref rs) = num_pair;
+ for str_pair in rs.iter() {
+ let &(ref radix, ref str) = str_pair;
+ assert_eq!(n, &BigUint::from_str_radix(str, *radix).unwrap());
+ }
+ }
+
+ let zed = BigUint::from_str_radix("Z", 10).ok();
+ assert_eq!(zed, None);
+ let blank = BigUint::from_str_radix("_", 2).ok();
+ assert_eq!(blank, None);
+ let blank_one = BigUint::from_str_radix("_1", 2).ok();
+ assert_eq!(blank_one, None);
+ let plus_one = BigUint::from_str_radix("+1", 10).ok();
+ assert_eq!(plus_one, Some(BigUint::from_slice(&[1])));
+ let plus_plus_one = BigUint::from_str_radix("++1", 10).ok();
+ assert_eq!(plus_plus_one, None);
+ let minus_one = BigUint::from_str_radix("-1", 10).ok();
+ assert_eq!(minus_one, None);
+ let zero_plus_two = BigUint::from_str_radix("0+2", 10).ok();
+ assert_eq!(zero_plus_two, None);
+ let three = BigUint::from_str_radix("1_1", 2).ok();
+ assert_eq!(three, Some(BigUint::from_slice(&[3])));
+ let ff = BigUint::from_str_radix("1111_1111", 2).ok();
+ assert_eq!(ff, Some(BigUint::from_slice(&[0xff])));
+}
+
+#[test]
+fn test_all_str_radix() {
+ let n = BigUint::new((0..10).collect());
+ for radix in 2..37 {
+ let s = n.to_str_radix(radix);
+ let x = BigUint::from_str_radix(&s, radix);
+ assert_eq!(x.unwrap(), n);
+
+ let s = s.to_ascii_uppercase();
+ let x = BigUint::from_str_radix(&s, radix);
+ assert_eq!(x.unwrap(), n);
+ }
+}
+
+#[test]
+fn test_lower_hex() {
+ let a = BigUint::parse_bytes(b"A", 16).unwrap();
+ let hello = BigUint::parse_bytes(b"22405534230753963835153736737", 10).unwrap();
+
+ assert_eq!(format!("{:x}", a), "a");
+ assert_eq!(format!("{:x}", hello), "48656c6c6f20776f726c6421");
+ assert_eq!(format!("{:♥>+#8x}", a), "♥♥♥♥+0xa");
+}
+
+#[test]
+fn test_upper_hex() {
+ let a = BigUint::parse_bytes(b"A", 16).unwrap();
+ let hello = BigUint::parse_bytes(b"22405534230753963835153736737", 10).unwrap();
+
+ assert_eq!(format!("{:X}", a), "A");
+ assert_eq!(format!("{:X}", hello), "48656C6C6F20776F726C6421");
+ assert_eq!(format!("{:♥>+#8X}", a), "♥♥♥♥+0xA");
+}
+
+#[test]
+fn test_binary() {
+ let a = BigUint::parse_bytes(b"A", 16).unwrap();
+ let hello = BigUint::parse_bytes(b"224055342307539", 10).unwrap();
+
+ assert_eq!(format!("{:b}", a), "1010");
+ assert_eq!(
+ format!("{:b}", hello),
+ "110010111100011011110011000101101001100011010011"
+ );
+ assert_eq!(format!("{:♥>+#8b}", a), "♥+0b1010");
+}
+
+#[test]
+fn test_octal() {
+ let a = BigUint::parse_bytes(b"A", 16).unwrap();
+ let hello = BigUint::parse_bytes(b"22405534230753963835153736737", 10).unwrap();
+
+ assert_eq!(format!("{:o}", a), "12");
+ assert_eq!(format!("{:o}", hello), "22062554330674403566756233062041");
+ assert_eq!(format!("{:♥>+#8o}", a), "♥♥♥+0o12");
+}
+
+#[test]
+fn test_display() {
+ let a = BigUint::parse_bytes(b"A", 16).unwrap();
+ let hello = BigUint::parse_bytes(b"22405534230753963835153736737", 10).unwrap();
+
+ assert_eq!(format!("{}", a), "10");
+ assert_eq!(format!("{}", hello), "22405534230753963835153736737");
+ assert_eq!(format!("{:♥>+#8}", a), "♥♥♥♥♥+10");
+}
+
+#[test]
+fn test_factor() {
+ fn factor(n: usize) -> BigUint {
+ let mut f: BigUint = One::one();
+ for i in 2..=n {
+ // FIXME(#5992): assignment operator overloads
+ // f *= FromPrimitive::from_usize(i);
+ let bu: BigUint = FromPrimitive::from_usize(i).unwrap();
+ f *= bu;
+ }
+ f
+ }
+
+ fn check(n: usize, s: &str) {
+ let n = factor(n);
+ let ans = BigUint::from_str_radix(s, 10).unwrap();
+ assert_eq!(n, ans);
+ }
+
+ check(3, "6");
+ check(10, "3628800");
+ check(20, "2432902008176640000");
+ check(30, "265252859812191058636308480000000");
+}
+
+#[test]
+fn test_bits() {
+ assert_eq!(BigUint::new(vec![0, 0, 0, 0]).bits(), 0);
+ let n: BigUint = FromPrimitive::from_usize(0).unwrap();
+ assert_eq!(n.bits(), 0);
+ let n: BigUint = FromPrimitive::from_usize(1).unwrap();
+ assert_eq!(n.bits(), 1);
+ let n: BigUint = FromPrimitive::from_usize(3).unwrap();
+ assert_eq!(n.bits(), 2);
+ let n: BigUint = BigUint::from_str_radix("4000000000", 16).unwrap();
+ assert_eq!(n.bits(), 39);
+ let one: BigUint = One::one();
+ assert_eq!((one << 426u16).bits(), 427);
+}
+
+#[test]
+fn test_iter_sum() {
+ let result: BigUint = FromPrimitive::from_isize(1234567).unwrap();
+ let data: Vec<BigUint> = vec![
+ FromPrimitive::from_u32(1000000).unwrap(),
+ FromPrimitive::from_u32(200000).unwrap(),
+ FromPrimitive::from_u32(30000).unwrap(),
+ FromPrimitive::from_u32(4000).unwrap(),
+ FromPrimitive::from_u32(500).unwrap(),
+ FromPrimitive::from_u32(60).unwrap(),
+ FromPrimitive::from_u32(7).unwrap(),
+ ];
+
+ assert_eq!(result, data.iter().sum::<BigUint>());
+ assert_eq!(result, data.into_iter().sum::<BigUint>());
+}
+
+#[test]
+fn test_iter_product() {
+ let data: Vec<BigUint> = vec![
+ FromPrimitive::from_u32(1001).unwrap(),
+ FromPrimitive::from_u32(1002).unwrap(),
+ FromPrimitive::from_u32(1003).unwrap(),
+ FromPrimitive::from_u32(1004).unwrap(),
+ FromPrimitive::from_u32(1005).unwrap(),
+ ];
+ let result = data.get(0).unwrap()
+ * data.get(1).unwrap()
+ * data.get(2).unwrap()
+ * data.get(3).unwrap()
+ * data.get(4).unwrap();
+
+ assert_eq!(result, data.iter().product::<BigUint>());
+ assert_eq!(result, data.into_iter().product::<BigUint>());
+}
+
+#[test]
+fn test_iter_sum_generic() {
+ let result: BigUint = FromPrimitive::from_isize(1234567).unwrap();
+ let data = vec![1000000_u32, 200000, 30000, 4000, 500, 60, 7];
+
+ assert_eq!(result, data.iter().sum::<BigUint>());
+ assert_eq!(result, data.into_iter().sum::<BigUint>());
+}
+
+#[test]
+fn test_iter_product_generic() {
+ let data = vec![1001_u32, 1002, 1003, 1004, 1005];
+ let result = data[0].to_biguint().unwrap()
+ * data[1].to_biguint().unwrap()
+ * data[2].to_biguint().unwrap()
+ * data[3].to_biguint().unwrap()
+ * data[4].to_biguint().unwrap();
+
+ assert_eq!(result, data.iter().product::<BigUint>());
+ assert_eq!(result, data.into_iter().product::<BigUint>());
+}
+
+#[test]
+fn test_pow() {
+ let one = BigUint::from(1u32);
+ let two = BigUint::from(2u32);
+ let four = BigUint::from(4u32);
+ let eight = BigUint::from(8u32);
+ let tentwentyfour = BigUint::from(1024u32);
+ let twentyfourtyeight = BigUint::from(2048u32);
+ macro_rules! check {
+ ($t:ty) => {
+ assert_eq!(Pow::pow(&two, 0 as $t), one);
+ assert_eq!(Pow::pow(&two, 1 as $t), two);
+ assert_eq!(Pow::pow(&two, 2 as $t), four);
+ assert_eq!(Pow::pow(&two, 3 as $t), eight);
+ assert_eq!(Pow::pow(&two, 10 as $t), tentwentyfour);
+ assert_eq!(Pow::pow(&two, 11 as $t), twentyfourtyeight);
+ assert_eq!(Pow::pow(&two, &(11 as $t)), twentyfourtyeight);
+ };
+ }
+ check!(u8);
+ check!(u16);
+ check!(u32);
+ check!(u64);
+ check!(u128);
+ check!(usize);
+}
+
+#[test]
+fn test_trailing_zeros() {
+ assert!(BigUint::from(0u8).trailing_zeros().is_none());
+ assert_eq!(BigUint::from(1u8).trailing_zeros().unwrap(), 0);
+ assert_eq!(BigUint::from(2u8).trailing_zeros().unwrap(), 1);
+ let x: BigUint = BigUint::one() << 128;
+ assert_eq!(x.trailing_zeros().unwrap(), 128);
+}
+
+#[test]
+fn test_trailing_ones() {
+ assert_eq!(BigUint::from(0u8).trailing_ones(), 0);
+ assert_eq!(BigUint::from(1u8).trailing_ones(), 1);
+ assert_eq!(BigUint::from(2u8).trailing_ones(), 0);
+ assert_eq!(BigUint::from(3u8).trailing_ones(), 2);
+ let x: BigUint = (BigUint::from(3u8) << 128) | BigUint::from(3u8);
+ assert_eq!(x.trailing_ones(), 2);
+ let x: BigUint = (BigUint::one() << 128) - BigUint::one();
+ assert_eq!(x.trailing_ones(), 128);
+}
+
+#[test]
+fn test_count_ones() {
+ assert_eq!(BigUint::from(0u8).count_ones(), 0);
+ assert_eq!(BigUint::from(1u8).count_ones(), 1);
+ assert_eq!(BigUint::from(2u8).count_ones(), 1);
+ assert_eq!(BigUint::from(3u8).count_ones(), 2);
+ let x: BigUint = (BigUint::from(3u8) << 128) | BigUint::from(3u8);
+ assert_eq!(x.count_ones(), 4);
+}
+
+#[test]
+fn test_bit() {
+ assert!(!BigUint::from(0u8).bit(0));
+ assert!(!BigUint::from(0u8).bit(100));
+ assert!(!BigUint::from(42u8).bit(4));
+ assert!(BigUint::from(42u8).bit(5));
+ let x: BigUint = (BigUint::from(3u8) << 128) | BigUint::from(3u8);
+ assert!(x.bit(129));
+ assert!(!x.bit(130));
+}
+
+#[test]
+fn test_set_bit() {
+ let mut x = BigUint::from(3u8);
+ x.set_bit(128, true);
+ x.set_bit(129, true);
+ assert_eq!(x, (BigUint::from(3u8) << 128) | BigUint::from(3u8));
+ x.set_bit(0, false);
+ x.set_bit(128, false);
+ x.set_bit(130, false);
+ assert_eq!(x, (BigUint::from(2u8) << 128) | BigUint::from(2u8));
+ x.set_bit(129, false);
+ x.set_bit(1, false);
+ assert_eq!(x, BigUint::zero());
+}
diff --git a/tests/biguint_scalar.rs b/tests/biguint_scalar.rs
new file mode 100644
index 0000000..b6eadd9
--- /dev/null
+++ b/tests/biguint_scalar.rs
@@ -0,0 +1,113 @@
+use num_bigint::BigUint;
+use num_traits::{ToPrimitive, Zero};
+
+mod consts;
+use crate::consts::*;
+
+#[macro_use]
+mod macros;
+
+#[test]
+fn test_scalar_add() {
+ fn check(x: &BigUint, y: &BigUint, z: &BigUint) {
+ let (x, y, z) = (x.clone(), y.clone(), z.clone());
+ assert_unsigned_scalar_op!(x + y == z);
+ assert_unsigned_scalar_assign_op!(x += y == z);
+ }
+
+ for elm in SUM_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+
+ check(&a, &b, &c);
+ check(&b, &a, &c);
+ }
+}
+
+#[test]
+fn test_scalar_sub() {
+ fn check(x: &BigUint, y: &BigUint, z: &BigUint) {
+ let (x, y, z) = (x.clone(), y.clone(), z.clone());
+ assert_unsigned_scalar_op!(x - y == z);
+ assert_unsigned_scalar_assign_op!(x -= y == z);
+ }
+
+ for elm in SUM_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+
+ check(&c, &a, &b);
+ check(&c, &b, &a);
+ }
+}
+
+#[test]
+fn test_scalar_mul() {
+ fn check(x: &BigUint, y: &BigUint, z: &BigUint) {
+ let (x, y, z) = (x.clone(), y.clone(), z.clone());
+ assert_unsigned_scalar_op!(x * y == z);
+ assert_unsigned_scalar_assign_op!(x *= y == z);
+ }
+
+ for elm in MUL_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+
+ check(&a, &b, &c);
+ check(&b, &a, &c);
+ }
+}
+
+#[test]
+fn test_scalar_rem_noncommutative() {
+ assert_eq!(5u8 % BigUint::from(7u8), BigUint::from(5u8));
+ assert_eq!(BigUint::from(5u8) % 7u8, BigUint::from(5u8));
+}
+
+#[test]
+fn test_scalar_div_rem() {
+ fn check(x: &BigUint, y: &BigUint, z: &BigUint, r: &BigUint) {
+ let (x, y, z, r) = (x.clone(), y.clone(), z.clone(), r.clone());
+ assert_unsigned_scalar_op!(x / y == z);
+ assert_unsigned_scalar_op!(x % y == r);
+ assert_unsigned_scalar_assign_op!(x /= y == z);
+ assert_unsigned_scalar_assign_op!(x %= y == r);
+ }
+
+ for elm in MUL_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+
+ if !a.is_zero() {
+ check(&c, &a, &b, &Zero::zero());
+ }
+
+ if !b.is_zero() {
+ check(&c, &b, &a, &Zero::zero());
+ }
+ }
+
+ for elm in DIV_REM_QUADRUPLES.iter() {
+ let (a_vec, b_vec, c_vec, d_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+ let d = BigUint::from_slice(d_vec);
+
+ if !b.is_zero() {
+ check(&a, &b, &c, &d);
+ assert_unsigned_scalar_op!(a / b == c);
+ assert_unsigned_scalar_op!(a % b == d);
+ assert_unsigned_scalar_assign_op!(a /= b == c);
+ assert_unsigned_scalar_assign_op!(a %= b == d);
+ }
+ }
+}
diff --git a/tests/consts/mod.rs b/tests/consts/mod.rs
new file mode 100644
index 0000000..5d0555d
--- /dev/null
+++ b/tests/consts/mod.rs
@@ -0,0 +1,51 @@
+#![allow(unused)]
+
+pub const N1: u32 = -1i32 as u32;
+pub const N2: u32 = -2i32 as u32;
+
+pub const SUM_TRIPLES: &[(&[u32], &[u32], &[u32])] = &[
+ (&[], &[], &[]),
+ (&[], &[1], &[1]),
+ (&[1], &[1], &[2]),
+ (&[1], &[1, 1], &[2, 1]),
+ (&[1], &[N1], &[0, 1]),
+ (&[1], &[N1, N1], &[0, 0, 1]),
+ (&[N1, N1], &[N1, N1], &[N2, N1, 1]),
+ (&[1, 1, 1], &[N1, N1], &[0, 1, 2]),
+ (&[2, 2, 1], &[N1, N2], &[1, 1, 2]),
+ (&[1, 2, 2, 1], &[N1, N2], &[0, 1, 3, 1]),
+];
+
+pub const M: u32 = ::std::u32::MAX;
+pub const MUL_TRIPLES: &[(&[u32], &[u32], &[u32])] = &[
+ (&[], &[], &[]),
+ (&[], &[1], &[]),
+ (&[2], &[], &[]),
+ (&[1], &[1], &[1]),
+ (&[2], &[3], &[6]),
+ (&[1], &[1, 1, 1], &[1, 1, 1]),
+ (&[1, 2, 3], &[3], &[3, 6, 9]),
+ (&[1, 1, 1], &[N1], &[N1, N1, N1]),
+ (&[1, 2, 3], &[N1], &[N1, N2, N2, 2]),
+ (&[1, 2, 3, 4], &[N1], &[N1, N2, N2, N2, 3]),
+ (&[N1], &[N1], &[1, N2]),
+ (&[N1, N1], &[N1], &[1, N1, N2]),
+ (&[N1, N1, N1], &[N1], &[1, N1, N1, N2]),
+ (&[N1, N1, N1, N1], &[N1], &[1, N1, N1, N1, N2]),
+ (&[M / 2 + 1], &[2], &[0, 1]),
+ (&[0, M / 2 + 1], &[2], &[0, 0, 1]),
+ (&[1, 2], &[1, 2, 3], &[1, 4, 7, 6]),
+ (&[N1, N1], &[N1, N1, N1], &[1, 0, N1, N2, N1]),
+ (&[N1, N1, N1], &[N1, N1, N1, N1], &[1, 0, 0, N1, N2, N1, N1]),
+ (&[0, 0, 1], &[1, 2, 3], &[0, 0, 1, 2, 3]),
+ (&[0, 0, 1], &[0, 0, 0, 1], &[0, 0, 0, 0, 0, 1]),
+];
+
+pub const DIV_REM_QUADRUPLES: &[(&[u32], &[u32], &[u32], &[u32])] = &[
+ (&[1], &[2], &[], &[1]),
+ (&[3], &[2], &[1], &[1]),
+ (&[1, 1], &[2], &[M / 2 + 1], &[1]),
+ (&[1, 1, 1], &[2], &[M / 2 + 1, M / 2 + 1], &[1]),
+ (&[0, 1], &[N1], &[1], &[1]),
+ (&[N1, N1], &[N2], &[2, 1], &[3]),
+];
diff --git a/tests/macros/mod.rs b/tests/macros/mod.rs
new file mode 100644
index 0000000..b14cd57
--- /dev/null
+++ b/tests/macros/mod.rs
@@ -0,0 +1,78 @@
+#![allow(unused)]
+
+/// Assert that an op works for all val/ref combinations
+macro_rules! assert_op {
+ ($left:ident $op:tt $right:ident == $expected:expr) => {
+ assert_eq!((&$left) $op (&$right), $expected);
+ assert_eq!((&$left) $op $right.clone(), $expected);
+ assert_eq!($left.clone() $op (&$right), $expected);
+ assert_eq!($left.clone() $op $right.clone(), $expected);
+ };
+}
+
+/// Assert that an assign-op works for all val/ref combinations
+macro_rules! assert_assign_op {
+ ($left:ident $op:tt $right:ident == $expected:expr) => {{
+ let mut left = $left.clone();
+ assert_eq!({ left $op &$right; left}, $expected);
+
+ let mut left = $left.clone();
+ assert_eq!({ left $op $right.clone(); left}, $expected);
+ }};
+}
+
+/// Assert that an op works for scalar left or right
+macro_rules! assert_scalar_op {
+ (($($to:ident),*) $left:ident $op:tt $right:ident == $expected:expr) => {
+ $(
+ if let Some(left) = $left.$to() {
+ assert_op!(left $op $right == $expected);
+ }
+ if let Some(right) = $right.$to() {
+ assert_op!($left $op right == $expected);
+ }
+ )*
+ };
+}
+
+macro_rules! assert_unsigned_scalar_op {
+ ($left:ident $op:tt $right:ident == $expected:expr) => {
+ assert_scalar_op!((to_u8, to_u16, to_u32, to_u64, to_usize, to_u128)
+ $left $op $right == $expected);
+ };
+}
+
+macro_rules! assert_signed_scalar_op {
+ ($left:ident $op:tt $right:ident == $expected:expr) => {
+ assert_scalar_op!((to_u8, to_u16, to_u32, to_u64, to_usize, to_u128,
+ to_i8, to_i16, to_i32, to_i64, to_isize, to_i128)
+ $left $op $right == $expected);
+ };
+}
+
+/// Assert that an op works for scalar right
+macro_rules! assert_scalar_assign_op {
+ (($($to:ident),*) $left:ident $op:tt $right:ident == $expected:expr) => {
+ $(
+ if let Some(right) = $right.$to() {
+ let mut left = $left.clone();
+ assert_eq!({ left $op right; left}, $expected);
+ }
+ )*
+ };
+}
+
+macro_rules! assert_unsigned_scalar_assign_op {
+ ($left:ident $op:tt $right:ident == $expected:expr) => {
+ assert_scalar_assign_op!((to_u8, to_u16, to_u32, to_u64, to_usize, to_u128)
+ $left $op $right == $expected);
+ };
+}
+
+macro_rules! assert_signed_scalar_assign_op {
+ ($left:ident $op:tt $right:ident == $expected:expr) => {
+ assert_scalar_assign_op!((to_u8, to_u16, to_u32, to_u64, to_usize, to_u128,
+ to_i8, to_i16, to_i32, to_i64, to_isize, to_i128)
+ $left $op $right == $expected);
+ };
+}
diff --git a/tests/modpow.rs b/tests/modpow.rs
new file mode 100644
index 0000000..276f066
--- /dev/null
+++ b/tests/modpow.rs
@@ -0,0 +1,181 @@
+static BIG_B: &str = "\
+ efac3c0a_0de55551_fee0bfe4_67fa017a_1a898fa1_6ca57cb1\
+ ca9e3248_cacc09a9_b99d6abc_38418d0f_82ae4238_d9a68832\
+ aadec7c1_ac5fed48_7a56a71b_67ac59d5_afb28022_20d9592d\
+ 247c4efc_abbd9b75_586088ee_1dc00dc4_232a8e15_6e8191dd\
+ 675b6ae0_c80f5164_752940bc_284b7cee_885c1e10_e495345b\
+ 8fbe9cfd_e5233fe1_19459d0b_d64be53c_27de5a02_a829976b\
+ 33096862_82dad291_bd38b6a9_be396646_ddaf8039_a2573c39\
+ 1b14e8bc_2cb53e48_298c047e_d9879e9c_5a521076_f0e27df3\
+ 990e1659_d3d8205b_6443ebc0_9918ebee_6764f668_9f2b2be3\
+ b59cbc76_d76d0dfc_d737c3ec_0ccf9c00_ad0554bf_17e776ad\
+ b4edf9cc_6ce540be_76229093_5c53893b";
+
+static BIG_E: &str = "\
+ be0e6ea6_08746133_e0fbc1bf_82dba91e_e2b56231_a81888d2\
+ a833a1fc_f7ff002a_3c486a13_4f420bf3_a5435be9_1a5c8391\
+ 774d6e6c_085d8357_b0c97d4d_2bb33f7c_34c68059_f78d2541\
+ eacc8832_426f1816_d3be001e_b69f9242_51c7708e_e10efe98\
+ 449c9a4a_b55a0f23_9d797410_515da00d_3ea07970_4478a2ca\
+ c3d5043c_bd9be1b4_6dce479d_4302d344_84a939e6_0ab5ada7\
+ 12ae34b2_30cc473c_9f8ee69d_2cac5970_29f5bf18_bc8203e4\
+ f3e895a2_13c94f1e_24c73d77_e517e801_53661fdd_a2ce9e47\
+ a73dd7f8_2f2adb1e_3f136bf7_8ae5f3b8_08730de1_a4eff678\
+ e77a06d0_19a522eb_cbefba2a_9caf7736_b157c5c6_2d192591\
+ 17946850_2ddb1822_117b68a0_32f7db88";
+
+// This modulus is the prime from the 2048-bit MODP DH group:
+// https://tools.ietf.org/html/rfc3526#section-3
+static BIG_M: &str = "\
+ FFFFFFFF_FFFFFFFF_C90FDAA2_2168C234_C4C6628B_80DC1CD1\
+ 29024E08_8A67CC74_020BBEA6_3B139B22_514A0879_8E3404DD\
+ EF9519B3_CD3A431B_302B0A6D_F25F1437_4FE1356D_6D51C245\
+ E485B576_625E7EC6_F44C42E9_A637ED6B_0BFF5CB6_F406B7ED\
+ EE386BFB_5A899FA5_AE9F2411_7C4B1FE6_49286651_ECE45B3D\
+ C2007CB8_A163BF05_98DA4836_1C55D39A_69163FA8_FD24CF5F\
+ 83655D23_DCA3AD96_1C62F356_208552BB_9ED52907_7096966D\
+ 670C354E_4ABC9804_F1746C08_CA18217C_32905E46_2E36CE3B\
+ E39E772C_180E8603_9B2783A2_EC07A28F_B5C55DF0_6F4C52C9\
+ DE2BCBF6_95581718_3995497C_EA956AE5_15D22618_98FA0510\
+ 15728E5A_8AACAA68_FFFFFFFF_FFFFFFFF";
+
+static BIG_R: &str = "\
+ a1468311_6e56edc9_7a98228b_5e924776_0dd7836e_caabac13\
+ eda5373b_4752aa65_a1454850_40dc770e_30aa8675_6be7d3a8\
+ 9d3085e4_da5155cf_b451ef62_54d0da61_cf2b2c87_f495e096\
+ 055309f7_77802bbb_37271ba8_1313f1b5_075c75d1_024b6c77\
+ fdb56f17_b05bce61_e527ebfd_2ee86860_e9907066_edd526e7\
+ 93d289bf_6726b293_41b0de24_eff82424_8dfd374b_4ec59542\
+ 35ced2b2_6b195c90_10042ffb_8f58ce21_bc10ec42_64fda779\
+ d352d234_3d4eaea6_a86111ad_a37e9555_43ca78ce_2885bed7\
+ 5a30d182_f1cf6834_dc5b6e27_1a41ac34_a2e91e11_33363ff0\
+ f88a7b04_900227c9_f6e6d06b_7856b4bb_4e354d61_060db6c8\
+ 109c4735_6e7db425_7b5d74c7_0b709508";
+
+mod biguint {
+ use num_bigint::BigUint;
+ use num_integer::Integer;
+ use num_traits::Num;
+
+ fn check_modpow<T: Into<BigUint>>(b: T, e: T, m: T, r: T) {
+ let b: BigUint = b.into();
+ let e: BigUint = e.into();
+ let m: BigUint = m.into();
+ let r: BigUint = r.into();
+
+ assert_eq!(b.modpow(&e, &m), r);
+
+ let even_m = &m << 1;
+ let even_modpow = b.modpow(&e, &even_m);
+ assert!(even_modpow < even_m);
+ assert_eq!(even_modpow.mod_floor(&m), r);
+ }
+
+ #[test]
+ fn test_modpow_single() {
+ check_modpow::<u32>(1, 0, 11, 1);
+ check_modpow::<u32>(0, 15, 11, 0);
+ check_modpow::<u32>(3, 7, 11, 9);
+ check_modpow::<u32>(5, 117, 19, 1);
+ check_modpow::<u32>(20, 1, 2, 0);
+ check_modpow::<u32>(20, 1, 3, 2);
+ }
+
+ #[test]
+ fn test_modpow_small() {
+ for b in 0u64..11 {
+ for e in 0u64..11 {
+ for m in 1..11 {
+ check_modpow::<u64>(b, e, m, b.pow(e as u32) % m);
+ }
+ }
+ }
+ }
+
+ #[test]
+ fn test_modpow_big() {
+ let b = BigUint::from_str_radix(super::BIG_B, 16).unwrap();
+ let e = BigUint::from_str_radix(super::BIG_E, 16).unwrap();
+ let m = BigUint::from_str_radix(super::BIG_M, 16).unwrap();
+ let r = BigUint::from_str_radix(super::BIG_R, 16).unwrap();
+
+ assert_eq!(b.modpow(&e, &m), r);
+
+ let even_m = &m << 1;
+ let even_modpow = b.modpow(&e, &even_m);
+ assert!(even_modpow < even_m);
+ assert_eq!(even_modpow % m, r);
+ }
+}
+
+mod bigint {
+ use num_bigint::BigInt;
+ use num_integer::Integer;
+ use num_traits::{Num, One, Signed};
+
+ fn check_modpow<T: Into<BigInt>>(b: T, e: T, m: T, r: T) {
+ fn check(b: &BigInt, e: &BigInt, m: &BigInt, r: &BigInt) {
+ assert_eq!(&b.modpow(e, m), r, "{} ** {} (mod {}) != {}", b, e, m, r);
+
+ let even_m = m << 1u8;
+ let even_modpow = b.modpow(e, m);
+ assert!(even_modpow.abs() < even_m.abs());
+ assert_eq!(&even_modpow.mod_floor(&m), r);
+
+ // the sign of the result follows the modulus like `mod_floor`, not `rem`
+ assert_eq!(b.modpow(&BigInt::one(), m), b.mod_floor(m));
+ }
+
+ let b: BigInt = b.into();
+ let e: BigInt = e.into();
+ let m: BigInt = m.into();
+ let r: BigInt = r.into();
+
+ let neg_b_r = if e.is_odd() {
+ (-&r).mod_floor(&m)
+ } else {
+ r.clone()
+ };
+ let neg_m_r = r.mod_floor(&-&m);
+ let neg_bm_r = neg_b_r.mod_floor(&-&m);
+
+ check(&b, &e, &m, &r);
+ check(&-&b, &e, &m, &neg_b_r);
+ check(&b, &e, &-&m, &neg_m_r);
+ check(&-b, &e, &-&m, &neg_bm_r);
+ }
+
+ #[test]
+ fn test_modpow() {
+ check_modpow(1, 0, 11, 1);
+ check_modpow(0, 15, 11, 0);
+ check_modpow(3, 7, 11, 9);
+ check_modpow(5, 117, 19, 1);
+ check_modpow(-20, 1, 2, 0);
+ check_modpow(-20, 1, 3, 1);
+ }
+
+ #[test]
+ fn test_modpow_small() {
+ for b in -10i64..11 {
+ for e in 0i64..11 {
+ for m in -10..11 {
+ if m == 0 {
+ continue;
+ }
+ check_modpow(b, e, m, b.pow(e as u32).mod_floor(&m));
+ }
+ }
+ }
+ }
+
+ #[test]
+ fn test_modpow_big() {
+ let b = BigInt::from_str_radix(super::BIG_B, 16).unwrap();
+ let e = BigInt::from_str_radix(super::BIG_E, 16).unwrap();
+ let m = BigInt::from_str_radix(super::BIG_M, 16).unwrap();
+ let r = BigInt::from_str_radix(super::BIG_R, 16).unwrap();
+
+ check_modpow(b, e, m, r);
+ }
+}
diff --git a/tests/roots.rs b/tests/roots.rs
new file mode 100644
index 0000000..cfef80c
--- /dev/null
+++ b/tests/roots.rs
@@ -0,0 +1,160 @@
+mod biguint {
+ use num_bigint::BigUint;
+ use num_traits::{One, Zero};
+ use std::{i32, u32};
+
+ fn check<T: Into<BigUint>>(x: T, n: u32) {
+ let x: BigUint = x.into();
+ let root = x.nth_root(n);
+ println!("check {}.nth_root({}) = {}", x, n, root);
+
+ if n == 2 {
+ assert_eq!(root, x.sqrt())
+ } else if n == 3 {
+ assert_eq!(root, x.cbrt())
+ }
+
+ let lo = root.pow(n);
+ assert!(lo <= x);
+ assert_eq!(lo.nth_root(n), root);
+ if !lo.is_zero() {
+ assert_eq!((&lo - 1u32).nth_root(n), &root - 1u32);
+ }
+
+ let hi = (&root + 1u32).pow(n);
+ assert!(hi > x);
+ assert_eq!(hi.nth_root(n), &root + 1u32);
+ assert_eq!((&hi - 1u32).nth_root(n), root);
+ }
+
+ #[test]
+ fn test_sqrt() {
+ check(99u32, 2);
+ check(100u32, 2);
+ check(120u32, 2);
+ }
+
+ #[test]
+ fn test_cbrt() {
+ check(8u32, 3);
+ check(26u32, 3);
+ }
+
+ #[test]
+ fn test_nth_root() {
+ check(0u32, 1);
+ check(10u32, 1);
+ check(100u32, 4);
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_nth_root_n_is_zero() {
+ check(4u32, 0);
+ }
+
+ #[test]
+ fn test_nth_root_big() {
+ let x = BigUint::from(123_456_789_u32);
+ let expected = BigUint::from(6u32);
+
+ assert_eq!(x.nth_root(10), expected);
+ check(x, 10);
+ }
+
+ #[test]
+ fn test_nth_root_googol() {
+ let googol = BigUint::from(10u32).pow(100u32);
+
+ // perfect divisors of 100
+ for &n in &[2, 4, 5, 10, 20, 25, 50, 100] {
+ let expected = BigUint::from(10u32).pow(100u32 / n);
+ assert_eq!(googol.nth_root(n), expected);
+ check(googol.clone(), n);
+ }
+ }
+
+ #[test]
+ fn test_nth_root_twos() {
+ const EXP: u32 = 12;
+ const LOG2: usize = 1 << EXP;
+ let x = BigUint::one() << LOG2;
+
+ // the perfect divisors are just powers of two
+ for exp in 1..=EXP {
+ let n = 2u32.pow(exp);
+ let expected = BigUint::one() << (LOG2 / n as usize);
+ assert_eq!(x.nth_root(n), expected);
+ check(x.clone(), n);
+ }
+
+ // degenerate cases should return quickly
+ assert!(x.nth_root(x.bits() as u32).is_one());
+ assert!(x.nth_root(i32::MAX as u32).is_one());
+ assert!(x.nth_root(u32::MAX).is_one());
+ }
+
+ #[test]
+ fn test_roots_rand1() {
+ // A random input that found regressions
+ let s = "575981506858479247661989091587544744717244516135539456183849\
+ 986593934723426343633698413178771587697273822147578889823552\
+ 182702908597782734558103025298880194023243541613924361007059\
+ 353344183590348785832467726433749431093350684849462759540710\
+ 026019022227591412417064179299354183441181373862905039254106\
+ 4781867";
+ let x: BigUint = s.parse().unwrap();
+
+ check(x.clone(), 2);
+ check(x.clone(), 3);
+ check(x.clone(), 10);
+ check(x, 100);
+ }
+}
+
+mod bigint {
+ use num_bigint::BigInt;
+ use num_traits::Signed;
+
+ fn check(x: i64, n: u32) {
+ let big_x = BigInt::from(x);
+ let res = big_x.nth_root(n);
+
+ if n == 2 {
+ assert_eq!(&res, &big_x.sqrt())
+ } else if n == 3 {
+ assert_eq!(&res, &big_x.cbrt())
+ }
+
+ if big_x.is_negative() {
+ assert!(res.pow(n) >= big_x);
+ assert!((res - 1u32).pow(n) < big_x);
+ } else {
+ assert!(res.pow(n) <= big_x);
+ assert!((res + 1u32).pow(n) > big_x);
+ }
+ }
+
+ #[test]
+ fn test_nth_root() {
+ check(-100, 3);
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_nth_root_x_neg_n_even() {
+ check(-100, 4);
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_sqrt_x_neg() {
+ check(-4, 2);
+ }
+
+ #[test]
+ fn test_cbrt() {
+ check(8, 3);
+ check(-8, 3);
+ }
+}