diff --git a/.github/workflows/benches.yml b/.github/workflows/benches.yml index f957eda17c..e8ac86e6b9 100644 --- a/.github/workflows/benches.yml +++ b/.github/workflows/benches.yml @@ -7,7 +7,7 @@ on: env: OCAML_VERSION: "4.14.0" - RUST_TOOLCHAIN_VERSION: "1.67" + RUST_TOOLCHAIN_VERSION: "1.71" jobs: diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 2fd96e8a07..3490171728 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -2,8 +2,6 @@ name: CI on: push: - branches: - - master pull_request: env: diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000000..5b1dec4f59 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "optimism/ethereum-optimism"] + path = optimism/ethereum-optimism + url = https://github.com/ethereum-optimism/optimism.git diff --git a/Cargo.lock b/Cargo.lock index 1eadcabdb7..77da43c308 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -17,6 +17,12 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" +[[package]] +name = "adler32" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234" + [[package]] name = "ahash" version = "0.7.6" @@ -28,6 +34,18 @@ dependencies = [ "version_check", ] +[[package]] +name = "ahash" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd7d5a2cecb58716e47d67d5703a249964b14c7be1ec3cad3affc295b2d1c35d" +dependencies = [ + "cfg-if 1.0.0", + "once_cell", + "version_check", + "zerocopy", +] + [[package]] name = "aho-corasick" version = "1.0.2" @@ -46,6 +64,54 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "anstream" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ab91ebe16eb252986481c5b62f6098f3b698a45e34b5b98200cf20dd2484a44" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7079075b41f533b8c61d2a4d073c4676e1f8b249ff94a393b0595db304e0dd87" + +[[package]] +name = "anstyle-parse" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "317b9a89c1868f5ea6ff1d9539a69f45dffc21ce321ac1fd1160dfa48c8e2140" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" +dependencies = [ + "windows-sys", +] + +[[package]] +name = "anstyle-wincon" +version = "3.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0699d10d2f4d628a98ee7b57b289abbc98ff3bad977cb3152709d4bf2330628" +dependencies = [ + "anstyle", + "windows-sys", +] + [[package]] name = "ark-algebra-test-templates" version = "0.3.0" @@ -58,6 +124,17 @@ dependencies = [ "ark-std", ] +[[package]] +name = "ark-bn254" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea691771ebbb28aea556c044e2e5c5227398d840cee0c34d4d20fa8eb2689e8c" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-std", +] + [[package]] name = "ark-ec" version = "0.3.0" @@ -145,7 +222,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8dd4e5f0bf8285d5ed538d27fab7411f3e297908fd93c62195de8bee3f199e82" dependencies = [ - "proc-macro2 1.0.64", + "proc-macro2 1.0.69", "quote 1.0.29", "syn 1.0.109", ] @@ -179,7 +256,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87bf87e6e8b47264efa9bde63d6225c6276a52e05e91bf37eaa8afd0032d6b71" dependencies = [ "askama_shared", - "proc-macro2 1.0.64", + "proc-macro2 1.0.69", "syn 1.0.109", ] @@ -202,7 +279,7 @@ dependencies = [ "nom", "num-traits", "percent-encoding", - "proc-macro2 1.0.64", + "proc-macro2 1.0.69", "quote 1.0.29", "serde", "syn 1.0.109", @@ -243,9 +320,9 @@ dependencies = [ [[package]] name = "base64" -version = "0.21.2" +version = "0.21.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" +checksum = "35636a1494ede3b646cc98f74f8e62c773a38a659ebc777a2cf26b9b74171df9" [[package]] name = "bcs" @@ -406,7 +483,7 @@ dependencies = [ "atty", "bitflags 1.3.2", "clap_derive", - "clap_lex", + "clap_lex 0.2.4", "indexmap", "once_cell", "strsim 0.10.0", @@ -414,6 +491,27 @@ dependencies = [ "textwrap 0.16.0", ] +[[package]] +name = "clap" +version = "4.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d04704f56c2cde07f43e8e2c154b43f216dc5c92fc98ada720177362f953b956" +dependencies = [ + "clap_builder", +] + +[[package]] +name = "clap_builder" +version = "4.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e231faeaca65ebd1ea3c737966bf858971cd38c3849107aa3ea7de90a804e45" +dependencies = [ + "anstream", + "anstyle", + "clap_lex 0.5.1", + "strsim 0.10.0", +] + [[package]] name = "clap_derive" version = "3.2.25" @@ -422,7 +520,7 @@ checksum = "ae6371b8bdc8b7d3959e9cf7b22d4435ef3e79e138688421ec654acf8c81b008" dependencies = [ "heck", "proc-macro-error", - "proc-macro2 1.0.64", + "proc-macro2 1.0.69", "quote 1.0.29", "syn 1.0.109", ] @@ -436,6 +534,18 @@ dependencies = [ "os_str_bytes", ] +[[package]] +name = "clap_lex" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd7cc57abe963c6d3b9d8be5b06ba7c8957a930305ca90304f24ef040aa6f961" + +[[package]] +name = "colorchoice" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" + [[package]] name = "colored" version = "2.0.4" @@ -498,6 +608,15 @@ dependencies = [ "unicode-segmentation", ] +[[package]] +name = "core2" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b49ba7ef1ad6107f8824dbe97de947cbaac53c44e7f9756a1fba0d37c1eec505" +dependencies = [ + "memchr", +] + [[package]] name = "cpufeatures" version = "0.2.9" @@ -656,7 +775,7 @@ checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.64", + "proc-macro2 1.0.69", "quote 1.0.29", "strsim 0.10.0", "syn 1.0.109", @@ -673,13 +792,19 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "dary_heap" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7762d17f1241643615821a8455a0b2c3e803784b058693d990b11f2dce25a0ca" + [[package]] name = "derivative" version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ - "proc-macro2 1.0.64", + "proc-macro2 1.0.69", "quote 1.0.29", "syn 1.0.109", ] @@ -716,6 +841,12 @@ version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91" +[[package]] +name = "elf" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f6e7d85896690fe195447717af8eceae0593ac2196fd42fe88c184e904406ce" + [[package]] name = "entities" version = "1.0.1" @@ -792,9 +923,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.26" +version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b9429470923de8e8cbd4d2dc513535400b4b3fef0319fb5c4e1f520a7bef743" +checksum = "46303f565772937ffe1d394a4fac6f411c6013172fadde9dcdb1e147a086940e" dependencies = [ "crc32fast", "miniz_oxide", @@ -896,7 +1027,7 @@ version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" dependencies = [ - "ahash", + "ahash 0.7.6", ] [[package]] @@ -905,7 +1036,16 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" dependencies = [ - "ahash", + "ahash 0.7.6", +] + +[[package]] +name = "hashbrown" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" +dependencies = [ + "ahash 0.8.5", ] [[package]] @@ -1089,6 +1229,7 @@ dependencies = [ name = "kimchi" version = "0.1.0" dependencies = [ + "ark-bn254", "ark-ec", "ark-ff", "ark-poly", @@ -1131,7 +1272,7 @@ dependencies = [ ] [[package]] -name = "kimchi-asm" +name = "kimchi-visu" version = "0.1.0" dependencies = [ "ark-ec", @@ -1148,20 +1289,27 @@ dependencies = [ ] [[package]] -name = "kimchi-visu" +name = "kimchi_optimism" version = "0.1.0" dependencies = [ - "ark-ec", + "ark-bn254", "ark-ff", + "ark-poly", + "base64", + "clap 4.4.6", + "elf", + "groupmap", + "hex", "kimchi", + "libflate", "mina-curves", "mina-poseidon", - "o1-utils", "poly-commitment", + "regex", + "rmp-serde", "serde", "serde_json", "serde_with", - "tinytemplate", ] [[package]] @@ -1182,6 +1330,30 @@ version = "0.2.147" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" +[[package]] +name = "libflate" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f7d5654ae1795afc7ff76f4365c2c8791b0feb18e8996a96adad8ffd7c3b2bf" +dependencies = [ + "adler32", + "core2", + "crc32fast", + "dary_heap", + "libflate_lz77", +] + +[[package]] +name = "libflate_lz77" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be5f52fb8c451576ec6b79d3f4deb327398bc05bbdbd99021a6e77a4c855d524" +dependencies = [ + "core2", + "hashbrown 0.13.2", + "rle-decode-fast", +] + [[package]] name = "libm" version = "0.2.7" @@ -1223,9 +1395,9 @@ checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4" [[package]] name = "memchr" -version = "2.5.0" +version = "2.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" +checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" [[package]] name = "memoffset" @@ -1262,7 +1434,7 @@ version = "4.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b5bc45b761bcf1b5e6e6c4128cd93b84c218721a8d9b894aa0aff4ed180174c" dependencies = [ - "proc-macro2 1.0.64", + "proc-macro2 1.0.69", "quote 1.0.29", "syn 1.0.109", ] @@ -1469,7 +1641,7 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" dependencies = [ - "proc-macro2 1.0.64", + "proc-macro2 1.0.69", "quote 1.0.29", "syn 1.0.109", ] @@ -1564,7 +1736,7 @@ version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b40aa99a001268b85eb18414ecd190dc21fceaeaf81214ca28233b6feb25a998" dependencies = [ - "proc-macro2 1.0.64", + "proc-macro2 1.0.69", "quote 1.0.29", "syn 1.0.109", "synstructure", @@ -1589,7 +1761,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1894efdef5c9d83d17932c5f5db16d16eb5c8ae1a625ce44d9d1715e85d9d8dc" dependencies = [ "convert_case", - "proc-macro2 1.0.64", + "proc-macro2 1.0.69", "quote 1.0.29", "syn 1.0.109", ] @@ -1700,9 +1872,9 @@ checksum = "b3e8cba4ec22bada7fc55ffe51e2deb6a0e0db2d0b7ab0b103acc80d2510c190" dependencies = [ "pest", "pest_meta", - "proc-macro2 1.0.64", + "proc-macro2 1.0.69", "quote 1.0.29", - "syn 2.0.25", + "syn 2.0.38", ] [[package]] @@ -1768,6 +1940,7 @@ dependencies = [ name = "poly-commitment" version = "0.1.0" dependencies = [ + "ark-bn254", "ark-ec", "ark-ff", "ark-poly", @@ -1805,7 +1978,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.64", + "proc-macro2 1.0.69", "quote 1.0.29", "syn 1.0.109", "version_check", @@ -1817,7 +1990,7 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.64", + "proc-macro2 1.0.69", "quote 1.0.29", "version_check", ] @@ -1839,9 +2012,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.64" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78803b62cbf1f46fde80d7c0e803111524b9877184cfe7c3033659490ac7a7da" +checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da" dependencies = [ "unicode-ident", ] @@ -1907,7 +2080,7 @@ version = "1.0.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "573015e8ab27661678357f27dc26460738fd2b6c86e46f386fde94cb5d913105" dependencies = [ - "proc-macro2 1.0.64", + "proc-macro2 1.0.69", ] [[package]] @@ -1997,25 +2170,25 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.1" +version = "1.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2eae68fc220f7cf2532e4494aded17545fce192d59cd996e0fe7887f4ceb575" +checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343" dependencies = [ "aho-corasick", "memchr", "regex-automata", - "regex-syntax 0.7.3", + "regex-syntax 0.8.2", ] [[package]] name = "regex-automata" -version = "0.3.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83d3daa6976cffb758ec878f108ba0e062a45b2d6ca3a2cca965338855476caf" +checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.7.3", + "regex-syntax 0.8.2", ] [[package]] @@ -2026,9 +2199,15 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.7.3" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ab07dc67230e4a4718e70fd5c20055a4334b121f1f9db8fe63ef39ce9b8c846" +checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" + +[[package]] +name = "rle-decode-fast" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3582f63211428f83597b51b2ddb88e2a91a9d52d12831f9d08f5e624e8977422" [[package]] name = "rmp" @@ -2200,9 +2379,9 @@ version = "1.0.171" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "389894603bd18c46fa56231694f8d827779c0951a667087194cf9de94ed24682" dependencies = [ - "proc-macro2 1.0.64", + "proc-macro2 1.0.69", "quote 1.0.29", - "syn 2.0.25", + "syn 2.0.38", ] [[package]] @@ -2233,7 +2412,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" dependencies = [ "darling", - "proc-macro2 1.0.64", + "proc-macro2 1.0.69", "quote 1.0.29", "syn 1.0.109", ] @@ -2301,7 +2480,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ "heck", - "proc-macro2 1.0.64", + "proc-macro2 1.0.69", "quote 1.0.29", "rustversion", "syn 1.0.109", @@ -2358,18 +2537,18 @@ version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.64", + "proc-macro2 1.0.69", "quote 1.0.29", "unicode-ident", ] [[package]] name = "syn" -version = "2.0.25" +version = "2.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15e3fc8c0c74267e2df136e5e5fb656a464158aa57624053375eb9c8c6e25ae2" +checksum = "e96b79aaa137db8f61e26363a0c9b47d8b4ec75da28b7d1d614c2303e232408b" dependencies = [ - "proc-macro2 1.0.64", + "proc-macro2 1.0.69", "quote 1.0.29", "unicode-ident", ] @@ -2380,7 +2559,7 @@ version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ - "proc-macro2 1.0.64", + "proc-macro2 1.0.69", "quote 1.0.29", "syn 1.0.109", "unicode-xid 0.2.4", @@ -2489,9 +2668,9 @@ version = "1.0.43" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "463fe12d7993d3b327787537ce8dd4dfa058de32fc2b195ef3cde03dc4771e8f" dependencies = [ - "proc-macro2 1.0.64", + "proc-macro2 1.0.69", "quote 1.0.29", - "syn 2.0.25", + "syn 2.0.38", ] [[package]] @@ -2640,6 +2819,12 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39ec24b3121d976906ece63c9daad25b85969647682eee313cb5779fdd69e14e" +[[package]] +name = "utf8parse" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" + [[package]] name = "vec_map" version = "0.8.2" @@ -2696,9 +2881,9 @@ dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2 1.0.64", + "proc-macro2 1.0.69", "quote 1.0.29", - "syn 2.0.25", + "syn 2.0.38", "wasm-bindgen-shared", ] @@ -2718,9 +2903,9 @@ version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ - "proc-macro2 1.0.64", + "proc-macro2 1.0.69", "quote 1.0.29", - "syn 2.0.25", + "syn 2.0.38", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -2887,6 +3072,26 @@ dependencies = [ "linked-hash-map", ] +[[package]] +name = "zerocopy" +version = "0.7.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8db0ac2df3d060f81ec0380ccc5b71c2a7c092cfced671feeee1320e95559c87" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b6093bc6d5265ff40b479c834cdd25d8e20784781a2a29a8106327393d0a9ff" +dependencies = [ + "proc-macro2 1.0.69", + "quote 1.0.29", + "syn 2.0.38", +] + [[package]] name = "zeroize" version = "1.6.0" @@ -2902,7 +3107,7 @@ version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ - "proc-macro2 1.0.64", + "proc-macro2 1.0.69", "quote 1.0.29", - "syn 2.0.25", + "syn 2.0.38", ] diff --git a/Cargo.toml b/Cargo.toml index 473e37d933..851713bf6b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,11 +6,11 @@ members = [ "groupmap", "hasher", "kimchi", + "optimism", "poseidon", "poseidon/export_test_vectors", "poly-commitment", "signer", - "tools/kimchi-asm", "tools/kimchi-visu", "utils", "internal-tracing", diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 95d99267d6..aeccb68783 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -58,17 +58,6 @@ - [Permutation](./kimchi/permut.md) - [Lookup](./kimchi/lookup.md) -# Snarky - -- [Overview](./snarky/overview.md) -- [API](./snarky/api.md) -- [snarky wrapper](./snarky/snarky-wrapper.md) -- [Kimchi backend](./snarky/kimchi-backend.md) -- [Vars](./snarky/vars.md) -- [Booleans](./snarky/booleans.md) -- [Circuit generation](./snarky/circuit-generation.md) -- [Witness generation](./snarky/witness-generation.md) - # Pickles & Inductive Proof Systems - [Overview](./fundamentals/zkbook_ips.md) diff --git a/book/src/snarky/api.md b/book/src/snarky/api.md deleted file mode 100644 index e8b981a474..0000000000 --- a/book/src/snarky/api.md +++ /dev/null @@ -1,2 +0,0 @@ -# API of Snarky - diff --git a/book/src/snarky/booleans.md b/book/src/snarky/booleans.md deleted file mode 100644 index 7b503f0580..0000000000 --- a/book/src/snarky/booleans.md +++ /dev/null @@ -1,73 +0,0 @@ -# Booleans - -Booleans are a good example of a [snarky variable](./vars.md#snarky-vars). - -```rust -pub struct Boolean(CVar); - -impl SnarkyType for Boolean -where - F: PrimeField, -{ - type Auxiliary = (); - - type OutOfCircuit = bool; - - const SIZE_IN_FIELD_ELEMENTS: usize = 1; - - fn to_cvars(&self) -> (Vec>, Self::Auxiliary) { - (vec![self.0.clone()], ()) - } - - fn from_cvars_unsafe(cvars: Vec>, _aux: Self::Auxiliary) -> Self { - assert_eq!(cvars.len(), Self::SIZE_IN_FIELD_ELEMENTS); - Self(cvars[0].clone()) - } - - fn check(&self, cs: &mut RunState) { - // TODO: annotation? - cs.assert_(Some("boolean check"), vec![BasicSnarkyConstraint::Boolean(self.0.clone())]); - } - - fn deserialize(&self) -> (Self::OutOfCircuit, Self::Auxiliary) { - todo!() - } - - fn serialize(out_of_circuit: Self::OutOfCircuit, aux: Self::Auxiliary) -> Self { - todo!() - } - - fn constraint_system_auxiliary() -> Self::Auxiliary { - todo!() - } - - fn value_to_field_elements(x: &Self::OutOfCircuit) -> (Vec, Self::Auxiliary) { - todo!() - } - - fn value_of_field_elements(x: (Vec, Self::Auxiliary)) -> Self::OutOfCircuit { - todo!() - } -} -``` - -## Check - -The `check()` function is simply constraining the `CVar` $x$ to be either $0$ or $1$ using the following constraint: - -$$x ( x - 1) = 0$$ - -It is trivial to use the [double generic gate](../specs/kimchi.md#double-generic-gate) for this. - -## And - -$$x \land y = x \times y$$ - -## Not - -$$\sim x = 1 - x$$ - -## Or - -* $\sim x \land \sim y = b$ -* $x \lor y = \sim b$ diff --git a/book/src/snarky/circuit-generation.md b/book/src/snarky/circuit-generation.md deleted file mode 100644 index e81793aa03..0000000000 --- a/book/src/snarky/circuit-generation.md +++ /dev/null @@ -1,29 +0,0 @@ -# Circuit generation - -In circuit generation mode, the `has_witness` field of `RunState` is set to the default `CircuitGeneration`, and the program of the user is ran to completion. - -During the execution, the different snarky functions called on `RunState` will create [internal variables](./vars.md) as well as constraints. - -## Creation of variables - -[Variables](./vars.md) can be created via the `compute()` function, which takes two arguments: - -* A `TypeCreation` toggle, which is either set to `Checked` or `Unsafe`. We will describe this below. -* A closure representing the actual computation behind the variable. This computation will only take place when real values are computed, and can be non-deterministic (e.g. random, or external values provided by the user). Note that the closure takes one argument: a `WitnessGeneration`, a structure that allows you to read the runtime values of any variables that were previously created in your program. - -The `compute()` function also needs a type hint to understand what type of [snarky type](./vars.md#snarky-vars) it is creating. - -It then performs the following steps: - -* creates enough [`CVar`](./vars#circuit-vars) to hold the value to be created -* retrieves the auxiliary data needed to create the snarky type (TODO: explain auxiliary data) and create the [`snarky variable`](./vars.md#snarky-vars) out of the `CVar`s and the auxiliary data -* if the `TypeCreation` is set to `Checked`, call the `check()` function on the snarky type (which will constrain the value created), if it is set to `Unsafe` do nothing (in which case we're trusting that the value cannot be malformed, this is mostly used internally and it is highly-likely that users directly making use of `Unsafe` are writing bugs) - -```admonish -At this point we only created variables to hold future values, and made sure that they are constrained. -The actual values will fill the room created by the `CVar` only during the [witness generation](./witness-generation.md). -``` - -## Constraints - -All other functions exposed by the API are basically here to operate on variables and create constraints in doing so. diff --git a/book/src/snarky/kimchi-backend.md b/book/src/snarky/kimchi-backend.md deleted file mode 100644 index 2d2ebf789a..0000000000 --- a/book/src/snarky/kimchi-backend.md +++ /dev/null @@ -1,234 +0,0 @@ -# Kimchi Backend - -![](https://i.imgur.com/KmKU5Pl.jpg) - -Underneath the snarky wrapper (in `snarky/checked_runner.rs`) lies what we used to call the `plonk_constraint_system` or `kimchi_backend` in `snarky/constraint_systen.rs`. - -```admonish -It is good to note that we're planning on removing this abstract separation between the snarky wrapper and the constraint system. -``` - -The logic in the kimchi backend serves two purposes: - -* **Circuit generation**. It is the logic that adds gates to our list of gates (representing the circuit). For most of these gates, the variables used are passed to the backend by the snarky wrapper, but some of them are created by the backend itself (see more in the [variables section](#variables)). -* **Witness generation**. It is the logic that creates the witness - -One can also perform two additional operations once the constraint system has been compiled: - -* Generate the prover and verifier index for the system. -* Get a hash of the constraint system (this includes the circuit, the number of public input) (TODO: verify that this is true) (TODO: what else should be in that hash? a version of snarky and a version of kimchi?). - -## A circuit - -A circuit is either being built, or has been contructed during a circuit generation phase: - -```rust -enum Circuit -where - F: PrimeField, -{ - /** A circuit still being written. */ - Unfinalized(Vec>), - /** Once finalized, a circuit is represented as a digest - and a list of gates that corresponds to the circuit. - */ - Compiled([u8; 32], Vec>), -} -``` - -## State - -The state of the kimchi backend looks like this: - -```rust -where - Field: PrimeField, -{ - /// A counter used to track variables - /// (similar to the one in the snarky wrapper) - next_internal_var: usize, - - /// Instruction on how to compute each internal variable - /// (as a linear combination of other variables). - /// Used during witness generation. - internal_vars: HashMap, Option)>, - - /// The symbolic execution trace table. - /// Each cell is a variable that takes a value during witness generation. - /// (if not set, it will take the value 0). - rows: Vec>>, - - /// The circuit once compiled - gates: Circuit, - - /// The row to use the next time we add a constraint. - // TODO: I think we can delete this - next_row: usize, - - /// The size of the public input - /// (which fills the first rows of our constraint system. - public_input_size: Option, - - // omitted values... -} -``` - -## Variables - -In the backend, there's two types of variables: - -```rust -enum V { - /// An external variable - /// (generated by snarky, via [exists]). - External(usize), - - /// An internal variable is generated to hold an intermediate value, - /// (e.g. in reducing linear combinations to single PLONK positions). - Internal(InternalVar), -} -``` - -Internal variables are basically a `usize` pointing to a hashmap in the state. - -That hashmap tells you how to compute the internal variable during witness generation: it is always a linear combination of other variables (and a constant). - -## Circuit generation - -During circuit generation, the snarky wrapper will make calls to the `add_constraint()` or `add_basic_snarky_constraint` function of the kimchi backend, specifying what gate to use and what variables to use in that gate. - -At this point, the snarky wrapper might have some variables that are not yet tracked as such (with a counter). -Rather, they are constants, or they are a combination of other variables. -You can see that as a small AST representing how to compute a variable. -(See the [variables section](./vars.md#circuit-vars) for more details). - -For this reason, they can hide a number of operations that haven't been constrained yet. -It is the role of the `add_constrain` logic to enforce that at this point constants, as well as linear combinations or scalings of variables, are encoded in the circuit. -This is done by adding enough generic gates (using the `reduce_lincom()` or `reduce_to_var()` functions). - -```admonish -This is a remnant of an optimization targetting R1CS (in which additions are for free). -An issue with this approach is the following: imagine that two circuit variables are created from the same circuit variable, imagine also that the original circuit variable contained a long AST, then both variables might end up creating the same constraints to convert that AST. -Currently, snarkyjs and pickles expose a `seal()` function that allows you to reduce this issue, at the cost of some manual work and mental tracking on the developer. -We should probably get rid of this, while making sure that we can continue to optimize generic gates -(in some cases you can merge two generic gates in one (TODO: give an example of where that can happen)). -Another solution is to keep track of what was reduced, and reuse previous reductions (similar to how we handle constants). -``` - -It is during this "reducing" step that internal variables (known only to the kimchi backend) are created. - -```admonish -The process is quite safe, as the kimchi backend cannot use the snarky wrapper variables directly (which are of type `CVar`). -Since the expected format (see the [variables section](#variables) is a number (of type `usize`), the only way to convert a non-tracked variable (constant, or scale, or linear combination) is to reduce it (and in the process constraining its value). -``` - -Depending on the gate being used, several constraints might be added via the `add_row()` function which does three things: - -1. figure out if there's any wiring to be done -2. add a gate to our list of gates (representing the circuit) -3. add the variables to our _symbolic_ execution trace table (symbolic in the sense that nothing has values yet) - -This process happens as the circuit is "parsed" and the constraint functions of the kimchi backend are called. - -This does not lead to a finalized circuit, see the next section to see how that is done. - -(TODO: ideally this should happen in the same step) - -## Finalization of the circuit. - -So far we've only talked about adding specific constraints to the circuit, but not about how public input are handled. - -The `finalization()` function of the kimchi backend does the following: - -* add as many generic rows as there are public inputs. -* construct the permutation -* computes a cache of the circuit (TODO: this is so unecessary) -* and other things that are not that important - -## Witness generation - -Witness generation happens by taking the finalized state (in the `compute_witness()` function) with a callback that can be used to retrieve the values of external variables (public input and public output). - -The algorithm follows these steps using the symbolic execution table we built during circuit generation: - -1. it initializes the execution trace table with zeros -2. go through the rows related to the public input and set the most-left column values to the ones obtained by the callback. -3. go through the other rows and compute the value of the variables left in the table - -Variables in step 3. should either: - -* be absent (`None`) and evaluated to the default value 0 -* point to an external variable, in which case the closure passed can be used to retrieve the value -* be an internal variable, in which case the value is computed by evaluating the AST that was used to create it. - -## Permutation - -The permutation is used to wire cells of the execution trace table (specifically, cells belonging to the first 7 columns). -It is also known as "copy constraints". - -```admonish -In snarky, the permutation is represented differently from kimchi, and thus needs to be converted to the kimchi's format before a proof can be created. -TODO: merge the representations -``` - -We use the permutation in ingenious ways to optimize circuits. -For example, we use it to encode each constants once, and wire it to places where it is used. -Another example, is that we use it to assert equality between two cells. - -## Implementation details - -There's two aspect of the implementation of the permutation, the first one is a hashmap of equivalence classes, which is used to track all the positions of a variable, the second one is making use of a [union find]() data structure to link variables that are equivalent (we'll talk about that after). - -The two data structures are in the kimchi backend's state: - -```rust -pub struct SnarkyConstraintSystem -where - Field: PrimeField, -{ - equivalence_classes: HashMap>>, - union_finds: disjoint_set::DisjointSet, - // omitted fields... -} -``` - -### equivalence classes - -As said previously, during circuit generation a symbolic execution trace table is created. It should look a bit like this (if there were only 3 columns and 4 rows): - -| | 0 | 1 | 2 | -| :-: | :-: | :-: | :-:| -| 0 | v1 | v1 | | -| 1 | | v2 | | -| 2 | | v2 | | -| 3 | | | v1 | - -From that, it should be clear that all the cells containing the variable `v1` should be connected, -and all the cells containing the variable `v2` should be as well. - -The format that the permutation expects is a [cycle](https://en.wikipedia.org/wiki/Cyclic_permutation): a list of cells where each cell is linked to the next, the last one wrapping around and linking to the first one. - -For example, a cycle for the `v1` variable could be: - -``` -(0, 0) -> (0, 1) -(0, 1) -> (3, 2) -(3, 2) -> (0, 0) -``` - -During circuit generation, a hashmap (called `equivalence_classes`) is used to track all the positions (row and column) of each variable. - -During finalization, all the different cycles are created by looking at all the variables existing in the hashmap. - -### Union finds - -Sometimes, we know that two variables will have equivalent values due to an `assert_equal()` being called to link them. -Since we link two variables together, they need to be part of the same cycle, and as such we need to be able to detect that to construct correct cycles. - -To do this, we use a [union find]() data structure, which allows us to easily find the unions of equivalent variables. - -When an `assert_equal()` is called, we link the two variables together using the `union_finds` data structure. - -During finalization, when we create the cycles, we use the `union_finds` data structure to find the equivalent variables. -We then create a new equivalence classes hashmap to merge the keys (variables) that are in the same set. -This is done before using the equivalence classes hashmap to construct the cycles. diff --git a/book/src/snarky/overview.md b/book/src/snarky/overview.md deleted file mode 100644 index b67c1fa30b..0000000000 --- a/book/src/snarky/overview.md +++ /dev/null @@ -1,32 +0,0 @@ -# Snarky - -Snarky is a frontend to the [kimchi proof system](../kimchi/overview.md). - -It allows users to write circuits that can be proven using kimchi. - -This part of the Mina book documents both how to use snarky, and how its internals work. - -```admonish -Snarky was originally an OCaml library. It also is known as a typescript library: SnarkyJS. -This documentation talks about the Rust implementation, which one can refer to as snarky-rs (but we will just say snarky from now on). -``` - -## High-level design - -Snarky is divided into two parts: - -* **Circuit-generation**: which is also called the setup or compilation phase. It is when snarky turn code written using its library, to a circuit that kimchi can understand. This can later be used by kimchi to produce prover and verifier keys. -* **Witness-generation**: which is also called the proving, or runtime phase. It is when snarky executes the written program and records its state at various point in time to create an execution trace of the program (which we call witness here). This can later be used by kimchi, with a proving key, to produce a zero-knowledge proof. - -A snarky program is constructed using functions exposed by the library. -The API of snarky that one can use to design circuits can be split in three categories: - -* creation of snarky variables (via `compute()`) -* creation of constraints (via `assert` type-functions) -* manipulation of snarky variables (which can sometimes create constraints) - -Snarky itself is divided into three parts: - -* [The high-level API](./api.md) that you can find in `api.rs` and `traits.rs` -* [The snarky wrapper](./snarky-wrapper.md), which contains the logic for creating user variables and composed types (see the section on [Snarky vars](./vars.md#snarky-vars)). -* [The kimchi backend](./kimchi-backend.md), which contains the logic for constructing the circuit as well as the witness. diff --git a/book/src/snarky/snarky-wrapper.md b/book/src/snarky/snarky-wrapper.md deleted file mode 100644 index 725f7c35ec..0000000000 --- a/book/src/snarky/snarky-wrapper.md +++ /dev/null @@ -1,70 +0,0 @@ -# Snarky wrapper - -Snarky, as of today, is constructed as two parts: - -* a snarky wrapper, which is explained in this document -* a backend underneath that wrapper, explained in the [kimchi backend section](./kimchi-backend.md) - -```admonish -This separation exists for legacy reasons, and ideally we should merge the two into a single library. -``` - -The snarky wrapper mostly exists in `checked_runner.rs`, and has the following state: - -```rust -where - F: PrimeField, -{ - /// The constraint system used to build the circuit. - /// If not set, the constraint system is not built. - system: Option>, - - /// The public input of the circuit used in witness generation. - // TODO: can we merge public_input and private_input? - public_input: Vec, - - // TODO: we could also just store `usize` here - pub(crate) public_output: Vec>, - - /// The private input of the circuit used in witness generation. Still not sure what that is, or why we care about this. - private_input: Vec, - - /// If set, the witness generation will check if the constraints are satisfied. - /// This is useful to simulate running the circuit and return an error if an assertion fails. - eval_constraints: bool, - - /// The number of public inputs. - num_public_inputs: usize, - - /// A counter used to track variables (this includes public inputs) as they're being created. - next_var: usize, - - /// Indication that we're running the witness generation (as opposed to the circuit creation). - mode: Mode, -} -``` - -The wrapper is designed to be used in different ways, depending on the fields set. - -```admonish -Ideally, we would like to only run this once and obtain a result that's an immutable compiled artifact. -Currently, `public_input`, `private_input`, `eval_constriants`, `next_var`, and `mode` all need to be mutable. -In the future these should be passed as arguments to functions, and should not exist in the state. -``` - -## Public output - -The support for public output is implemented as kind of a hack. - -When the developer writes a circuit, they have to specify the type of the public output. - -This allows the API to save enough room at the end of the public input, and store the variables used in the public output in the state. - -When the API calls the circuit written by the developer, it expects the public output (as a snarky type) to be returned by the function. -The compilation or proving API that ends up calling that function, can thus obtain the variables of the public output. -With that in hand, the API can continue to write the circuit to enforce an equality constraint between these variables being returned and the public output variable that it had previously stored in the state. - -Essentially, the kimchi backend will turn this into as many wiring as there are `CVar` in the public output. - -During witness generation, we need a way to modify the witness once we know the values of the public output. -As the public output `CVar`s were generated from the snarky wrapper (and not from the kimchi backend), the snarky wrapper should know their values after running the given circuit. diff --git a/book/src/snarky/vars.md b/book/src/snarky/vars.md deleted file mode 100644 index 7a1e3a3be7..0000000000 --- a/book/src/snarky/vars.md +++ /dev/null @@ -1,135 +0,0 @@ -# Vars - -In this section we will introduce two types of variables: - -* Circuit vars, or `CVar`s, which are low-level variables representing field elements. -* Snarky vars, which are high-level variables that user can use to create more meaningful programs. - -## Circuit vars - -In snarky, we first define circuit variables (TODO: rename Field variable?) which represent field elements in a circuit. -These circuit variables, or cvars, can be represented differently in the system: - -```rust -pub enum CVar -where - F: PrimeField, -{ - /// A constant. - Constant(F), - - /// A variable that can be refered to via a `usize`. - Var(usize), - - /// The addition of two other [CVar]s. - Add(Box>, Box>), - - /// Scaling of a [CVar]. - Scale(F, Box>), -} -``` - -One can see a CVar as an AST, where two atoms exist: a `Var(usize)` which represents a private input, an a `Constant(F)` which represents a constant. -Anything else represents combinations of these two atoms. - -### Constants - -Note that a circuit variable does not represent a value that has been constrained in the circuit (yet). -This is why we need to know if a cvar is a constant, so that we can avoid constraining it too early. -For example, the following code does not encode 2 or 1 in the circuit, but will encode 3: - -```rust -let x: CVar = state.exists(|_| 2) + state.exists(|_| 3); -state.assert_eq(x, y); // 3 and y will be encoded in the circuit -``` - -whereas the following code will encode all variables: - -```rust -let x = y + y; -let one: CVar = state.exists(|_| 1); -assert_eq(x, one); -``` - -### Non-constants - -Right after being created, a `CVar` is not constrained yet, and needs to be constrained by the application. -That is unless the application wants the `CVar` to be a constant that will not need to be constrained (see previous example) or because the application wants the `CVar` to be a random value (unlikely) (TODO: we should add a "rand" function for that). - -In any case, a circuit variable which is not a constant has a value that is not known yet at circuit-generation time. -In some situations, we might not want to constrain the - - -### When do variables get constrained? - -In general, a circuit variable only gets constrained by an assertion call like `assert` or `assert_equals`. - -When variables are added together, or scaled, they do not directly get constrained. -This is due to optimizations targetting R1CS (which we don't support anymore) that were implemented in the original snarky library, and that we have kept in snarky-rs. - -Imagine the following example: - -```rust -let y = x1 + x2 + x3 +.... ; -let z = y + 3; -assert_eq(y, 6); -assert_eq(z, 7); -``` - -The first two lines will not create constraints, but simply create minimal ASTs that track all of the additions. - -Both assert calls will then reduce the variables to a single circuit variable, creating the same constraints twice. - -For this reason, there's a function `seal()` defined in pickles and snarkyjs. (TODO: more about `seal()`, and why is it not in snarky?) (TODO: remove the R1CS optimization) - -## Snarky vars - -Handling `CVar`s can be cumbersome, as they can only represent a single field element. -We might want to represent values that are either in a smaller range (e.g. [booleans](./booleans.md)) or that are made out of several `CVar`s. - -For this, snarky's API exposes the following trait, which allows users to define their own types: - -```rust -pub trait SnarkyType: Sized -where - F: PrimeField, -{ - /// ? - type Auxiliary; - - /// The equivalent type outside of the circuit. - type OutOfCircuit; - - const SIZE_IN_FIELD_ELEMENTS: usize; - - fn to_cvars(&self) -> (Vec>, Self::Auxiliary); - - fn from_cvars_unsafe(cvars: Vec>, aux: Self::Auxiliary) -> Self; - - fn check(&self, cs: &mut RunState); - - fn deserialize(&self) -> (Self::OutOfCircuit, Self::Auxiliary); - - fn serialize(out_of_circuit: Self::OutOfCircuit, aux: Self::Auxiliary) -> Self; - - fn constraint_system_auxiliary() -> Self::Auxiliary; - - fn value_to_field_elements(x: &Self::OutOfCircuit) -> (Vec, Self::Auxiliary); - - fn value_of_field_elements(x: (Vec, Self::Auxiliary)) -> Self::OutOfCircuit; -} -``` - -Such types are always handled as `OutOfCircuit` types (e.g. `bool`) by the users, and as a type implementing `SnarkyType` by snarky (e.g. [`Boolean`](./booleans.md)). -Thus, the user can pass them to snarky in two ways: - -**As public inputs**. In this case they will be serialized into field elements for snarky before [witness-generation](./witness-generation.md) (via the `value_to_field_elements()` function) - -**As private inputs**. In this case, they must be created using the `compute()` function with a closure returning an `OutOfCircuit` value by the user. -The call to `compute()` will need to have some type hint, for snarky to understand what `SnarkyType` it is creating. -This is because the relationship is currently only one-way: a `SnarkyType` knows what out-of-circuit type it relates to, but not the other way is not true. -(TODO: should we implement that though?) - -A `SnarkyType` always implements a `check()` function, which is called by snarky when `compute()` is called to create such a type. -The `check()` function is responsible for creating the constraints that sanitize the newly-created `SnarkyType` (and its underlying `CVar`s). -For example, creating a boolean would make sure that the underlying `CVar` is either 0 or 1. diff --git a/book/src/snarky/witness-generation.md b/book/src/snarky/witness-generation.md deleted file mode 100644 index 41fbc3b5f1..0000000000 --- a/book/src/snarky/witness-generation.md +++ /dev/null @@ -1,21 +0,0 @@ -# Witness generation - -In snarky, currently, the same code is run through again to generate the witness. - -That is, the `RunState` contains a few changes: - -* **`public_input: Vec`**: now contains concrete values (instead of being empty). -* **`has_witness`**: is set to `WitnessGeneration`. - -Additionaly, if we want to verify that the arguments are actually correct (and that the program implemented does not fail) we can also set `eval_constraints` to `true` (defaults to `false`) to verify that the program has a correct state at all point in time. - -If we do not do this, the user will only detect failure during proof generation (specifically when the [composition polynomial](../specs/kimchi.md#proof-creation) is divided by the [vanishing polynomial](../specs/kimchi.md#proof-creation)). - -```admonish -This is implemented by simply checking that each [generic gate](../specs/kimchi.md#double-generic-gate) encountered is correct, in relation to the witness values observed in that row. -In other words $c_0 l + c_1 r + c_2 o + c_3 l r + c_4 = 0$ (extrapolated to the [double generic gate](../specs/kimchi.md#double-generic-gate)). -Note that other custom gates are not checked, as they are wrapped by [gadgets](../specs/kimchi.md#gates) which fill in witness values instead of the user. -Thus there is no room for user error (i.e. the user entering a wrong private input). -``` - -Due to the `has_witness` variable set to `WitnessGeneration`, functions will behave differently and compute actual values instead of generating constraints. diff --git a/book/src/specs/kimchi.md b/book/src/specs/kimchi.md index 6651ee0439..b079c012d1 100644 --- a/book/src/specs/kimchi.md +++ b/book/src/specs/kimchi.md @@ -310,10 +310,11 @@ z_2 = &\ (w_0(g^i) + \sigma_0 \cdot beta + \gamma) \cdot \\ \end{align} $$ -If computed correctly, we should have $z(g^{n-3}) = 1$. +We randomize the evaluations at `n - zk_rows + 1` and `n - zk_rows + 2` in order to add +zero-knowledge to the protocol. + +For a valid witness, we then have have $z(g^{n-zk_rows}) = 1$. -Finally, randomize the last `EVAL_POINTS` evaluations $z(g^{n-2})$ and $z(g^{n-1})$, -in order to add zero-knowledge to the protocol. ### Lookup @@ -1607,11 +1608,34 @@ def sample(domain, i): The compilation steps to create the common index are as follow: 1. If the circuit is less than 2 gates, abort. -2. Create a domain for the circuit. That is, +1. Compute the number of zero-knowledge rows (`zk_rows`) that will be required to + achieve zero-knowledge. The following constraints apply to `zk_rows`: + * The number of chunks `c` results in an evaluation at `zeta` and `zeta * omega` in + each column for `2*c` evaluations per column, so `zk_rows >= 2*c + 1`. + * The permutation argument interacts with the `c` chunks in parallel, so it is + possible to cross-correlate between them to compromise zero knowledge. We know + that there is some `c >= 1` such that `zk_rows = 2*c + k` from the above. Thus, + attempting to find the evaluation at a new point, we find that: + * the evaluation of every witness column in the permutation contains `k` unknowns; + * the evaluations of the permutation argument aggregation has `k-1` unknowns; + * the permutation argument applies on all but `zk_rows - 3` rows; + * and thus we form the equation `zk_rows - 3 < 7 * k + (k - 1)` to ensure that we + can construct fewer equations than we have unknowns. + + This simplifies to `k > (2 * c - 2) / 7`, giving `zk_rows > (16 * c - 2) / 7`. + We can derive `c` from the `max_poly_size` supported by the URS, and thus we find + `zk_rows` and `domain_size` satisfying the fixpoint + + ```text + zk_rows = (16 * (domain_size / max_poly_size) + 5) / 7 + domain_size = circuit_size + zk_rows + ``` + +1. Create a domain for the circuit. That is, compute the smallest subgroup of the field that - has order greater or equal to `n + ZK_ROWS` elements. -3. Pad the circuit: add zero gates to reach the domain size. -4. sample the `PERMUTS` shifts. + has order greater or equal to `n + zk_rows` elements. +1. Pad the circuit: add zero gates to reach the domain size. +1. sample the `PERMUTS` shifts. ### Lookup Index @@ -1689,14 +1713,14 @@ Both the prover and the verifier index, besides the common parts described above These pre-computations are optimizations, in the context of normal proofs, but they are necessary for recursion. ```rs -pub struct ProverIndex { +pub struct ProverIndex> { /// constraints system polynomials #[serde(bound = "ConstraintSystem: Serialize + DeserializeOwned")] pub cs: ConstraintSystem, /// The symbolic linearization of our circuit, which can compile to concrete types once certain values are learned in the protocol. #[serde(skip)] - pub linearization: Linearization>>, + pub linearization: Linearization>, Column>, /// The mapping between powers of alpha and constraints #[serde(skip)] @@ -1704,7 +1728,8 @@ pub struct ProverIndex { /// polynomial commitment keys #[serde(skip)] - pub srs: Arc>, + #[serde(bound(deserialize = "OpeningProof::SRS: Default"))] + pub srs: Arc, /// maximal size of polynomial section pub max_poly_size: usize, @@ -1714,7 +1739,7 @@ pub struct ProverIndex { /// The verifier index corresponding to this prover index #[serde(skip)] - pub verifier_index: Option>, + pub verifier_index: Option>, /// The verifier index digest corresponding to this prover index #[serde_as(as = "Option")] @@ -1752,15 +1777,18 @@ pub struct LookupVerifierIndex { #[serde_as] #[derive(Serialize, Deserialize, Debug, Clone)] -pub struct VerifierIndex { +pub struct VerifierIndex> { /// evaluation domain #[serde_as(as = "o1_utils::serialization::SerdeAs")] pub domain: D, /// maximal size of polynomial section pub max_poly_size: usize, + /// the number of randomized rows to achieve zero knowledge + pub zk_rows: u64, /// polynomial commitment keys #[serde(skip)] - pub srs: OnceCell>>, + #[serde(bound(deserialize = "OpeningProof::SRS: Default"))] + pub srs: Arc, /// number of public inputs pub public: usize, /// number of previous evaluation challenges, for recursive proving @@ -1825,7 +1853,7 @@ pub struct VerifierIndex { pub shift: [G::ScalarField; PERMUTS], /// zero-knowledge polynomial #[serde(skip)] - pub zkpm: OnceCell>, + pub permutation_vanishing_polynomial_m: OnceCell>, // TODO(mimoo): isn't this redundant with domain.d1.group_gen ? /// domain offset for zero-knowledge #[serde(skip)] @@ -1838,7 +1866,7 @@ pub struct VerifierIndex { pub lookup_index: Option>, #[serde(skip)] - pub linearization: Linearization>>, + pub linearization: Linearization>, Column>, /// The mapping between powers of alpha and constraints #[serde(skip)] pub powers_of_alpha: Alphas, @@ -1939,22 +1967,6 @@ pub struct PointEvaluations { pub zeta_omega: Evals, } -/// Evaluations of lookup polynomials -#[serde_as] -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct LookupEvaluations { - /// sorted lookup table polynomial - pub sorted: Vec, - /// lookup aggregation polynomial - pub aggreg: Evals, - // TODO: May be possible to optimize this away? - /// lookup table polynomial - pub table: Evals, - - /// Optionally, a runtime table polynomial. - pub runtime: Option, -} - // TODO: this should really be vectors here, perhaps create another type for chunked evaluations? /// Polynomial evaluations contained in a `ProverProof`. /// - **Chunked evaluations** `Field` is instantiated with vectors with a length that equals the length of the chunk @@ -1962,6 +1974,8 @@ pub struct LookupEvaluations { #[serde_as] #[derive(Debug, Clone, Serialize, Deserialize)] pub struct ProofEvaluations { + /// public input polynomials + pub public: Option, /// witness polynomials pub w: [Evals; COLUMNS], /// permutation polynomial @@ -1971,12 +1985,54 @@ pub struct ProofEvaluations { pub s: [Evals; PERMUTS - 1], /// coefficient polynomials pub coefficients: [Evals; COLUMNS], - /// lookup-related evaluations - pub lookup: Option>, /// evaluation of the generic selector polynomial pub generic_selector: Evals, /// evaluation of the poseidon selector polynomial pub poseidon_selector: Evals, + /// evaluation of the elliptic curve addition selector polynomial + pub complete_add_selector: Evals, + /// evaluation of the elliptic curve variable base scalar multiplication selector polynomial + pub mul_selector: Evals, + /// evaluation of the endoscalar multiplication selector polynomial + pub emul_selector: Evals, + /// evaluation of the endoscalar multiplication scalar computation selector polynomial + pub endomul_scalar_selector: Evals, + + // Optional gates + /// evaluation of the RangeCheck0 selector polynomial + pub range_check0_selector: Option, + /// evaluation of the RangeCheck1 selector polynomial + pub range_check1_selector: Option, + /// evaluation of the ForeignFieldAdd selector polynomial + pub foreign_field_add_selector: Option, + /// evaluation of the ForeignFieldMul selector polynomial + pub foreign_field_mul_selector: Option, + /// evaluation of the Xor selector polynomial + pub xor_selector: Option, + /// evaluation of the Rot selector polynomial + pub rot_selector: Option, + + // lookup-related evaluations + /// evaluation of lookup aggregation polynomial + pub lookup_aggregation: Option, + /// evaluation of lookup table polynomial + pub lookup_table: Option, + /// evaluation of lookup sorted polynomials + pub lookup_sorted: [Option; 5], + /// evaluation of runtime lookup table polynomial + pub runtime_lookup_table: Option, + + // lookup selectors + /// evaluation of the runtime lookup table selector polynomial + pub runtime_lookup_table_selector: Option, + /// evaluation of the Xor range check pattern selector polynomial + pub xor_lookup_selector: Option, + /// evaluation of the Lookup range check pattern selector polynomial + pub lookup_gate_lookup_selector: Option, + /// evaluation of the RangeCheck range check pattern selector polynomial + pub range_check_lookup_selector: Option, + /// evaluation of the ForeignFieldMul range check pattern selector polynomial + pub foreign_field_mul_lookup_selector: Option, } /// Commitments linked to the lookup feature @@ -2011,12 +2067,16 @@ pub struct ProverCommitments { #[serde_as] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(bound = "G: ark_serialize::CanonicalDeserialize + ark_serialize::CanonicalSerialize")] -pub struct ProverProof { +pub struct ProverProof { /// All the polynomial commitments required in the proof pub commitments: ProverCommitments, /// batched commitment opening proof - pub proof: OpeningProof, + #[serde(bound( + serialize = "OpeningProof: Serialize", + deserialize = "OpeningProof: Deserialize<'de>" + ))] + pub proof: OpeningProof, /// Two evaluations over a number of committed polynomials pub evals: ProofEvaluations>>, @@ -2074,10 +2134,10 @@ The prover then follows the following steps to create the proof: 1. Ensure we have room in the witness for the zero-knowledge rows. We currently expect the witness not to be of the same length as the domain, but instead be of the length of the (smaller) circuit. - If we cannot add `ZK_ROWS` rows to the columns of the witness before reaching + If we cannot add `zk_rows` rows to the columns of the witness before reaching the size of the domain, abort. 1. Pad the witness columns with Zero gates to make them the same length as the domain. - Then, randomize the last `ZK_ROWS` of each columns. + Then, randomize the last `zk_rows` of each columns. 1. Setup the Fq-Sponge. 1. Absorb the digest of the VerifierIndex. 1. Absorb the commitments of the previous challenges with the Fq-sponge. @@ -2141,7 +2201,6 @@ The prover then follows the following steps to create the proof: and by then dividing the resulting polynomial with the vanishing polynomial $Z_H$. TODO: specify the split of the permutation polynomial into perm and bnd? 1. commit (hiding) to the quotient polynomial $t$ - TODO: specify the dummies 1. Absorb the the commitment of the quotient polynomial with the Fq-Sponge. 1. Sample $\zeta'$ with the Fq-Sponge. 1. Derive $\zeta$ from $\zeta'$ using the endomorphism (TODO: specify) @@ -2178,7 +2237,6 @@ The prover then follows the following steps to create the proof: 1. Squeeze the Fq-sponge and absorb the result with the Fr-Sponge. 1. Absorb the previous recursion challenges. 1. Compute evaluations for the previous recursion challenges. -1. Evaluate the negated public polynomial (if present) at $\zeta$ and $\zeta\omega$. 1. Absorb the unique evaluation of ft: $ft(\zeta\omega)$. 1. Absorb all the polynomial evaluations in $\zeta$ and $\zeta\omega$: * the public polynomial @@ -2202,12 +2260,14 @@ The prover then follows the following steps to create the proof: * the poseidon selector * the 15 registers/witness columns * the 6 sigmas + * the optional gates * optionally, the runtime table 1. if using lookup: * add the lookup sorted polynomials * add the lookup aggreg polynomial * add the combined table polynomial * if present, add the runtime table polynomial + * the lookup selectors 1. Create an aggregated evaluation proof for all of these polynomials at $\zeta$ and $\zeta\omega$ using $u$ and $v$. @@ -2240,7 +2300,7 @@ We run the following algorithm: 1. Absorb the commitment to the permutation trace with the Fq-Sponge. 1. Sample $\alpha'$ with the Fq-Sponge. 1. Derive $\alpha$ from $\alpha'$ using the endomorphism (TODO: details). -1. Enforce that the length of the $t$ commitment is of size `PERMUTS`. +1. Enforce that the length of the $t$ commitment is of size 7. 1. Absorb the commitment to the quotient polynomial $t$ into the argument. 1. Sample $\zeta'$ with the Fq-Sponge. 1. Derive $\zeta$ from $\zeta'$ using the endomorphism (TODO: specify). @@ -2298,6 +2358,7 @@ Essentially, this steps verifies that $f(\zeta) = t(\zeta) * Z_H(\zeta)$. * witness commitments * coefficient commitments * sigma commitments + * optional gate commitments * lookup commitments #### Batch verification of proofs diff --git a/kimchi/Cargo.toml b/kimchi/Cargo.toml index 5f3404269b..d9415dce18 100644 --- a/kimchi/Cargo.toml +++ b/kimchi/Cargo.toml @@ -18,6 +18,7 @@ ark-ff = { version = "0.3.0", features = [ "parallel", "asm" ] } ark-ec = { version = "0.3.0", features = [ "parallel" ] } ark-poly = { version = "0.3.0", features = [ "parallel" ] } ark-serialize = "0.3.0" +ark-bn254 = { version = "0.3.0", optional = true } blake2 = "0.10.0" num-bigint = { version = "0.4.3", features = ["rand", "serde"]} num-derive = "0.3" @@ -81,5 +82,6 @@ harness = false default = [] internal_tracing = [ "internal-tracing/enabled" ] ocaml_types = [ "ocaml", "ocaml-gen", "poly-commitment/ocaml_types", "mina-poseidon/ocaml_types", "internal-tracing/ocaml_types" ] +bn254 = [ "ark-bn254" ] wasm_types = [ "wasm-bindgen" ] check_feature_flags = [] diff --git a/kimchi/src/bench.rs b/kimchi/src/bench.rs index 1af338d9ed..ccf4808c84 100644 --- a/kimchi/src/bench.rs +++ b/kimchi/src/bench.rs @@ -7,7 +7,7 @@ use mina_poseidon::{ sponge::{DefaultFqSponge, DefaultFrSponge}, }; use o1_utils::math; -use poly_commitment::commitment::CommitmentCurve; +use poly_commitment::{commitment::CommitmentCurve, evaluation_proof::OpeningProof}; use crate::{ circuits::{ @@ -28,8 +28,8 @@ type ScalarSponge = DefaultFrSponge; pub struct BenchmarkCtx { num_gates: usize, group_map: BWParameters, - index: ProverIndex, - verifier_index: VerifierIndex, + index: ProverIndex>, + verifier_index: VerifierIndex>, } impl BenchmarkCtx { @@ -77,7 +77,7 @@ impl BenchmarkCtx { } /// Produces a proof - pub fn create_proof(&self) -> (ProverProof, Vec) { + pub fn create_proof(&self) -> (ProverProof>, Vec) { // create witness let witness: [Vec; COLUMNS] = array::from_fn(|_| vec![1u32.into(); self.num_gates]); @@ -96,7 +96,8 @@ impl BenchmarkCtx { ) } - pub fn batch_verification(&self, batch: &[(ProverProof, Vec)]) { + #[allow(clippy::type_complexity)] + pub fn batch_verification(&self, batch: &[(ProverProof>, Vec)]) { // verify the proof let batch: Vec<_> = batch .iter() @@ -106,7 +107,11 @@ impl BenchmarkCtx { public_input: public, }) .collect(); - batch_verify::(&self.group_map, &batch).unwrap(); + batch_verify::>( + &self.group_map, + &batch, + ) + .unwrap(); } } diff --git a/kimchi/src/circuits/berkeley_columns.rs b/kimchi/src/circuits/berkeley_columns.rs new file mode 100644 index 0000000000..6558f4c367 --- /dev/null +++ b/kimchi/src/circuits/berkeley_columns.rs @@ -0,0 +1,159 @@ +use crate::{ + circuits::{ + expr::{self, ColumnEvaluations, Domain, ExprError, GenericColumn}, + gate::{CurrOrNext, GateType}, + lookup::lookups::LookupPattern, + }, + proof::{PointEvaluations, ProofEvaluations}, +}; +use serde::{Deserialize, Serialize}; +use CurrOrNext::{Curr, Next}; + +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize, Deserialize)] +/// A type representing one of the polynomials involved in the PLONK IOP. +pub enum Column { + Witness(usize), + Z, + LookupSorted(usize), + LookupAggreg, + LookupTable, + LookupKindIndex(LookupPattern), + LookupRuntimeSelector, + LookupRuntimeTable, + Index(GateType), + Coefficient(usize), + Permutation(usize), +} + +impl GenericColumn for Column { + fn domain(&self) -> Domain { + match self { + Column::Index(GateType::Generic) => Domain::D4, + Column::Index(GateType::CompleteAdd) => Domain::D4, + _ => Domain::D8, + } + } +} + +impl Column { + pub fn latex(&self) -> String { + match self { + Column::Witness(i) => format!("w_{{{i}}}"), + Column::Z => "Z".to_string(), + Column::LookupSorted(i) => format!("s_{{{i}}}"), + Column::LookupAggreg => "a".to_string(), + Column::LookupTable => "t".to_string(), + Column::LookupKindIndex(i) => format!("k_{{{i:?}}}"), + Column::LookupRuntimeSelector => "rts".to_string(), + Column::LookupRuntimeTable => "rt".to_string(), + Column::Index(gate) => { + format!("{gate:?}") + } + Column::Coefficient(i) => format!("c_{{{i}}}"), + Column::Permutation(i) => format!("sigma_{{{i}}}"), + } + } + + pub fn text(&self) -> String { + match self { + Column::Witness(i) => format!("w[{i}]"), + Column::Z => "Z".to_string(), + Column::LookupSorted(i) => format!("s[{i}]"), + Column::LookupAggreg => "a".to_string(), + Column::LookupTable => "t".to_string(), + Column::LookupKindIndex(i) => format!("k[{i:?}]"), + Column::LookupRuntimeSelector => "rts".to_string(), + Column::LookupRuntimeTable => "rt".to_string(), + Column::Index(gate) => { + format!("{gate:?}") + } + Column::Coefficient(i) => format!("c[{i}]"), + Column::Permutation(i) => format!("sigma_[{i}]"), + } + } +} + +impl expr::Variable { + pub fn ocaml(&self) -> String { + format!("var({:?}, {:?})", self.col, self.row) + } + + pub fn latex(&self) -> String { + let col = self.col.latex(); + match self.row { + Curr => col, + Next => format!("\\tilde{{{col}}}"), + } + } + + pub fn text(&self) -> String { + let col = self.col.text(); + match self.row { + Curr => format!("Curr({col})"), + Next => format!("Next({col})"), + } + } +} + +impl ColumnEvaluations for ProofEvaluations> { + type Column = Column; + fn evaluate(&self, col: Self::Column) -> Result, ExprError> { + use Column::*; + match col { + Witness(i) => Ok(self.w[i]), + Z => Ok(self.z), + LookupSorted(i) => self.lookup_sorted[i].ok_or(ExprError::MissingIndexEvaluation(col)), + LookupAggreg => self + .lookup_aggregation + .ok_or(ExprError::MissingIndexEvaluation(col)), + LookupTable => self + .lookup_table + .ok_or(ExprError::MissingIndexEvaluation(col)), + LookupRuntimeTable => self + .runtime_lookup_table + .ok_or(ExprError::MissingIndexEvaluation(col)), + Index(GateType::Poseidon) => Ok(self.poseidon_selector), + Index(GateType::Generic) => Ok(self.generic_selector), + Index(GateType::CompleteAdd) => Ok(self.complete_add_selector), + Index(GateType::VarBaseMul) => Ok(self.mul_selector), + Index(GateType::EndoMul) => Ok(self.emul_selector), + Index(GateType::EndoMulScalar) => Ok(self.endomul_scalar_selector), + Index(GateType::RangeCheck0) => self + .range_check0_selector + .ok_or(ExprError::MissingIndexEvaluation(col)), + Index(GateType::RangeCheck1) => self + .range_check1_selector + .ok_or(ExprError::MissingIndexEvaluation(col)), + Index(GateType::ForeignFieldAdd) => self + .foreign_field_add_selector + .ok_or(ExprError::MissingIndexEvaluation(col)), + Index(GateType::ForeignFieldMul) => self + .foreign_field_mul_selector + .ok_or(ExprError::MissingIndexEvaluation(col)), + Index(GateType::Xor16) => self + .xor_selector + .ok_or(ExprError::MissingIndexEvaluation(col)), + Index(GateType::Rot64) => self + .rot_selector + .ok_or(ExprError::MissingIndexEvaluation(col)), + Permutation(i) => Ok(self.s[i]), + Coefficient(i) => Ok(self.coefficients[i]), + LookupKindIndex(LookupPattern::Xor) => self + .xor_lookup_selector + .ok_or(ExprError::MissingIndexEvaluation(col)), + LookupKindIndex(LookupPattern::Lookup) => self + .lookup_gate_lookup_selector + .ok_or(ExprError::MissingIndexEvaluation(col)), + LookupKindIndex(LookupPattern::RangeCheck) => self + .range_check_lookup_selector + .ok_or(ExprError::MissingIndexEvaluation(col)), + LookupKindIndex(LookupPattern::ForeignFieldMul) => self + .foreign_field_mul_lookup_selector + .ok_or(ExprError::MissingIndexEvaluation(col)), + LookupRuntimeSelector => self + .runtime_lookup_table_selector + .ok_or(ExprError::MissingIndexEvaluation(col)), + Index(_) => Err(ExprError::MissingIndexEvaluation(col)), + } + } +} diff --git a/kimchi/src/circuits/constraints.rs b/kimchi/src/circuits/constraints.rs index 238e725bc1..115a696965 100644 --- a/kimchi/src/circuits/constraints.rs +++ b/kimchi/src/circuits/constraints.rs @@ -7,11 +7,11 @@ use crate::{ gate::{CircuitGate, GateType}, lookup::{index::LookupConstraintSystem, lookups::LookupFeatures, tables::LookupTable}, polynomial::{WitnessEvals, WitnessOverDomains, WitnessShifts}, - polynomials::permutation::{Shifts, ZK_ROWS}, + polynomials::permutation::Shifts, wires::*, }, curve::KimchiCurve, - error::SetupError, + error::{DomainCreationError, SetupError}, prover_index::ProverIndex, }; use ark_ff::{PrimeField, SquareRootField, Zero}; @@ -21,6 +21,7 @@ use ark_poly::{ }; use o1_utils::ExtendedEvaluations; use once_cell::sync::OnceCell; +use poly_commitment::OpenProof; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use serde_with::serde_as; use std::array; @@ -148,6 +149,8 @@ pub struct ConstraintSystem { #[serde(bound = "CircuitGate: Serialize + DeserializeOwned")] pub gates: Vec>, + pub zk_rows: u64, + /// flags for optional features pub feature_flags: FeatureFlags, @@ -191,6 +194,7 @@ pub struct Builder { runtime_tables: Option>>, precomputations: Option>>, disable_gates_checks: bool, + max_poly_size: Option, } /// Create selector polynomial for a circuit gate @@ -248,12 +252,14 @@ impl ConstraintSystem { runtime_tables: None, precomputations: None, disable_gates_checks: false, + max_poly_size: None, } } pub fn precomputations(&self) -> &Arc> { - self.precomputations - .get_or_init(|| Arc::new(DomainConstantEvaluations::create(self.domain).unwrap())) + self.precomputations.get_or_init(|| { + Arc::new(DomainConstantEvaluations::create(self.domain, self.zk_rows).unwrap()) + }) } pub fn set_precomputations(&self, precomputations: Arc>) { @@ -263,7 +269,12 @@ impl ConstraintSystem { } } -impl> ProverIndex { +impl< + F: PrimeField + SquareRootField, + G: KimchiCurve, + OpeningProof: OpenProof, + > ProverIndex +{ /// This function verifies the consistency of the wire /// assignments (witness) against the constraints /// witness: wire assignment witness @@ -307,7 +318,7 @@ impl> ProverInd } // check the gate's satisfiability - gate.verify::(row, &witness, self, public) + gate.verify(row, &witness, self, public) .map_err(|err| GateError::Custom { row, err })?; } @@ -360,8 +371,9 @@ impl ConstraintSystem { // compute permutation polynomials let shifts = Shifts::new(&self.domain.d1); - let mut sigmal1: [Vec; PERMUTS] = - array::from_fn(|_| vec![F::zero(); self.domain.d1.size()]); + let n = self.domain.d1.size(); + + let mut sigmal1: [Vec; PERMUTS] = array::from_fn(|_| vec![F::zero(); n]); for (row, gate) in self.gates.iter().enumerate() { for (cell, sigma) in gate.wires.iter().zip(sigmal1.iter_mut()) { @@ -369,6 +381,14 @@ impl ConstraintSystem { } } + // Zero out the sigmas in the zk rows, to ensure that the permutation aggregation is + // quasi-random for those rows. + for row in n + 2 - (self.zk_rows as usize)..n - 1 { + for sigma in sigmal1.iter_mut() { + sigma[row] = F::zero(); + } + } + let sigmal1: [_; PERMUTS] = { let [s0, s1, s2, s3, s4, s5, s6] = sigmal1; [ @@ -590,6 +610,10 @@ impl ConstraintSystem { } } +pub fn zk_rows_strict_lower_bound(num_chunks: usize) -> usize { + (2 * (PERMUTS + 1) * num_chunks - 2) / PERMUTS +} + impl Builder { /// Set up the number of public inputs. /// If not invoked, it equals `0` by default. @@ -643,6 +667,11 @@ impl Builder { self } + pub fn max_poly_size(mut self, max_poly_size: Option) -> Self { + self.max_poly_size = max_poly_size; + self + } + /// Build the [ConstraintSystem] from a [Builder]. pub fn build(self) -> Result, SetupError> { let mut gates = self.gates; @@ -655,8 +684,9 @@ impl Builder { let lookup_features = LookupFeatures::from_gates(&gates, runtime_tables.is_some()); - let num_lookups = { - let mut num_lookups: usize = lookup_tables + let lookup_domain_size = { + // First we sum over the lookup table size + let mut lookup_domain_size: usize = lookup_tables .iter() .map( |LookupTable { data, id: _ }| { @@ -668,28 +698,84 @@ impl Builder { }, ) .sum(); - for runtime_table in runtime_tables.iter() { - num_lookups += runtime_table.len(); + // After that on the runtime tables + if let Some(runtime_tables) = runtime_tables.as_ref() { + for runtime_table in runtime_tables.iter() { + lookup_domain_size += runtime_table.len(); + } } + // And we add the built-in tables, depending on the features. let LookupFeatures { patterns, .. } = &lookup_features; for pattern in patterns.into_iter() { if let Some(gate_table) = pattern.table() { - num_lookups += gate_table.table_size(); + lookup_domain_size += gate_table.table_size(); } } - num_lookups + lookup_domain_size }; - //~ 2. Create a domain for the circuit. That is, + //~ 1. Compute the number of zero-knowledge rows (`zk_rows`) that will be required to + //~ achieve zero-knowledge. The following constraints apply to `zk_rows`: + //~ * The number of chunks `c` results in an evaluation at `zeta` and `zeta * omega` in + //~ each column for `2*c` evaluations per column, so `zk_rows >= 2*c + 1`. + //~ * The permutation argument interacts with the `c` chunks in parallel, so it is + //~ possible to cross-correlate between them to compromise zero knowledge. We know + //~ that there is some `c >= 1` such that `zk_rows = 2*c + k` from the above. Thus, + //~ attempting to find the evaluation at a new point, we find that: + //~ * the evaluation of every witness column in the permutation contains `k` unknowns; + //~ * the evaluations of the permutation argument aggregation has `k-1` unknowns; + //~ * the permutation argument applies on all but `zk_rows - 3` rows; + //~ * and thus we form the equation `zk_rows - 3 < 7 * k + (k - 1)` to ensure that we + //~ can construct fewer equations than we have unknowns. + //~ + //~ This simplifies to `k > (2 * c - 2) / 7`, giving `zk_rows > (16 * c - 2) / 7`. + //~ We can derive `c` from the `max_poly_size` supported by the URS, and thus we find + //~ `zk_rows` and `domain_size` satisfying the fixpoint + //~ + //~ ```text + //~ zk_rows = (16 * (domain_size / max_poly_size) + 5) / 7 + //~ domain_size = circuit_size + zk_rows + //~ ``` + //~ + let (zk_rows, domain_size_lower_bound) = { + let circuit_lower_bound = std::cmp::max(gates.len(), lookup_domain_size + 1); + let get_domain_size_lower_bound = |zk_rows: u64| circuit_lower_bound + zk_rows as usize; + + let mut zk_rows = 3; + let mut domain_size_lower_bound = get_domain_size_lower_bound(zk_rows); + if let Some(max_poly_size) = self.max_poly_size { + // Iterate to find a fixed-point where zk_rows is sufficient for the number of + // chunks that we use, and also does not cause us to overflow the domain size. + // NB: We use iteration here rather than hard-coding an assumption about + // `compute_size_of_domain`s internals. In practice, this will never be executed + // more than once. + while { + let domain_size = D::::compute_size_of_domain(domain_size_lower_bound) + .ok_or(SetupError::DomainCreation( + DomainCreationError::DomainSizeFailed(domain_size_lower_bound), + ))?; + let num_chunks = if domain_size < max_poly_size { + 1 + } else { + domain_size / max_poly_size + }; + zk_rows = (zk_rows_strict_lower_bound(num_chunks) + 1) as u64; + domain_size_lower_bound = get_domain_size_lower_bound(zk_rows); + domain_size < domain_size_lower_bound + } {} + } + (zk_rows, domain_size_lower_bound) + }; + + //~ 1. Create a domain for the circuit. That is, //~ compute the smallest subgroup of the field that - //~ has order greater or equal to `n + ZK_ROWS` elements. - let domain_size_lower_bound = - std::cmp::max(gates.len(), num_lookups + 1) + ZK_ROWS as usize; - let domain = EvaluationDomains::::create(domain_size_lower_bound)?; + //~ has order greater or equal to `n + zk_rows` elements. + let domain = EvaluationDomains::::create(domain_size_lower_bound) + .map_err(SetupError::DomainCreation)?; - assert!(domain.d1.size > ZK_ROWS); + assert!(domain.d1.size > zk_rows); - //~ 3. Pad the circuit: add zero gates to reach the domain size. + //~ 1. Pad the circuit: add zero gates to reach the domain size. let d1_size = domain.d1.size(); let mut padding = (gates.len()..d1_size) .map(|i| { @@ -723,15 +809,20 @@ impl Builder { } } - //~ 4. sample the `PERMUTS` shifts. + //~ 1. sample the `PERMUTS` shifts. let shifts = Shifts::new(&domain.d1); // // Lookup // ------ - let lookup_constraint_system = - LookupConstraintSystem::create(&gates, lookup_tables, runtime_tables, &domain) - .map_err(|e| SetupError::ConstraintSystem(e.to_string()))?; + let lookup_constraint_system = LookupConstraintSystem::create( + &gates, + lookup_tables, + runtime_tables, + &domain, + zk_rows as usize, + ) + .map_err(|e| SetupError::ConstraintSystem(e.to_string()))?; let sid = shifts.map[0].clone(); @@ -748,6 +839,7 @@ impl Builder { gates, shift: shifts.shifts, endo, + zk_rows, //fr_sponge_params: self.sponge_params, lookup_constraint_system, feature_flags, @@ -789,4 +881,32 @@ pub mod tests { Self::for_testing(gates) } } + + #[test] + pub fn test_domains_computation_with_runtime_tables() { + let dummy_gate = CircuitGate { + typ: GateType::Generic, + wires: [Wire::new(0, 0); PERMUTS], + coeffs: vec![Fp::zero()], + }; + // inputs + expected output + let data = [((10, 10), 128), ((0, 0), 8), ((5, 100), 512)]; + for ((number_of_rt_cfgs, size), expected_domain_size) in data.into_iter() { + let builder = ConstraintSystem::create(vec![dummy_gate.clone(), dummy_gate.clone()]); + let table_ids: Vec = (0..number_of_rt_cfgs).collect(); + let rt_cfgs: Vec> = table_ids + .into_iter() + .map(|table_id| { + let indexes: Vec = (0..size).collect(); + let first_column: Vec = indexes.into_iter().map(Fp::from).collect(); + RuntimeTableCfg { + id: table_id, + first_column, + } + }) + .collect(); + let res = builder.runtime(Some(rt_cfgs)).build().unwrap(); + assert_eq!(res.domain.d1.size, expected_domain_size) + } + } } diff --git a/kimchi/src/circuits/domain_constant_evaluation.rs b/kimchi/src/circuits/domain_constant_evaluation.rs index 15cbb5ff92..6659f42d31 100644 --- a/kimchi/src/circuits/domain_constant_evaluation.rs +++ b/kimchi/src/circuits/domain_constant_evaluation.rs @@ -1,8 +1,6 @@ //! This contains the [DomainConstantEvaluations] which is used to provide precomputations to a [ConstraintSystem](super::constraints::ConstraintSystem). use crate::circuits::domains::EvaluationDomains; -use crate::circuits::polynomials::permutation::zk_polynomial; -use crate::circuits::polynomials::permutation::ZK_ROWS; use ark_ff::FftField; use ark_poly::EvaluationDomain; use ark_poly::UVPolynomial; @@ -10,7 +8,7 @@ use ark_poly::{univariate::DensePolynomial as DP, Evaluations as E, Radix2Evalua use serde::{Deserialize, Serialize}; use serde_with::serde_as; -use super::polynomials::permutation::vanishes_on_last_4_rows; +use super::polynomials::permutation::{permutation_vanishing_polynomial, vanishes_on_last_n_rows}; #[serde_as] #[derive(Clone, Serialize, Deserialize, Debug)] @@ -26,18 +24,18 @@ pub struct DomainConstantEvaluations { /// 0-th Lagrange evaluated over domain.d8 #[serde_as(as = "o1_utils::serialization::SerdeAs")] pub constant_1_d8: E>, - /// the polynomial that vanishes on the last four rows + /// the polynomial that vanishes on the zero-knowledge rows and the row before #[serde_as(as = "o1_utils::serialization::SerdeAs")] - pub vanishes_on_last_4_rows: E>, + pub vanishes_on_zero_knowledge_and_previous_rows: E>, /// zero-knowledge polynomial over domain.d8 #[serde_as(as = "o1_utils::serialization::SerdeAs")] - pub zkpl: E>, + pub permutation_vanishing_polynomial_l: E>, #[serde_as(as = "o1_utils::serialization::SerdeAs")] - pub zkpm: DP, + pub permutation_vanishing_polynomial_m: DP, } impl DomainConstantEvaluations { - pub fn create(domain: EvaluationDomains) -> Option { + pub fn create(domain: EvaluationDomains, zk_rows: u64) -> Option { let poly_x_d1 = DP::from_coefficients_slice(&[F::zero(), F::one()]) .evaluate_over_domain_by_ref(domain.d8); let constant_1_d4 = @@ -45,22 +43,24 @@ impl DomainConstantEvaluations { let constant_1_d8 = E::>::from_vec_and_domain(vec![F::one(); domain.d8.size()], domain.d8); - let vanishes_on_last_4_rows = - vanishes_on_last_4_rows(domain.d1).evaluate_over_domain(domain.d8); + let vanishes_on_zero_knowledge_and_previous_rows = + vanishes_on_last_n_rows(domain.d1, zk_rows + 1).evaluate_over_domain(domain.d8); - assert!(domain.d1.size > ZK_ROWS); + assert!(domain.d1.size > zk_rows); // x^3 - x^2(w1+w2+w3) + x(w1w2+w1w3+w2w3) - w1w2w3 - let zkpm = zk_polynomial(domain.d1); - let zkpl = zkpm.evaluate_over_domain_by_ref(domain.d8); + let permutation_vanishing_polynomial_m = + permutation_vanishing_polynomial(domain.d1, zk_rows); + let permutation_vanishing_polynomial_l = + permutation_vanishing_polynomial_m.evaluate_over_domain_by_ref(domain.d8); Some(DomainConstantEvaluations { poly_x_d1, constant_1_d4, constant_1_d8, - vanishes_on_last_4_rows, - zkpl, - zkpm, + vanishes_on_zero_knowledge_and_previous_rows, + permutation_vanishing_polynomial_l, + permutation_vanishing_polynomial_m, }) } } diff --git a/kimchi/src/circuits/domains.rs b/kimchi/src/circuits/domains.rs index 7f32dd6e12..89251bea5f 100644 --- a/kimchi/src/circuits/domains.rs +++ b/kimchi/src/circuits/domains.rs @@ -3,7 +3,7 @@ use ark_poly::{EvaluationDomain, Radix2EvaluationDomain as Domain}; use serde::{Deserialize, Serialize}; use serde_with::serde_as; -use crate::error::SetupError; +use crate::error::DomainCreationError; #[serde_as] #[derive(Debug, Clone, Copy, Serialize, Deserialize)] @@ -22,26 +22,29 @@ impl EvaluationDomains { /// Creates 4 evaluation domains `d1` (of size `n`), `d2` (of size `2n`), `d4` (of size `4n`), /// and `d8` (of size `8n`). If generator of `d8` is `g`, the generator /// of `d4` is `g^2`, the generator of `d2` is `g^4`, and the generator of `d1` is `g^8`. - pub fn create(n: usize) -> Result { - let n = Domain::::compute_size_of_domain(n).ok_or(SetupError::DomainCreation( - "could not compute size of domain", - ))?; + pub fn create(n: usize) -> Result { + let n = Domain::::compute_size_of_domain(n) + .ok_or(DomainCreationError::DomainSizeFailed(n))?; - let d1 = Domain::::new(n).ok_or(SetupError::DomainCreation( - "construction of domain d1 did not work as intended", + let d1 = Domain::::new(n).ok_or(DomainCreationError::DomainConstructionFailed( + "d1".to_string(), + n, ))?; // we also create domains of larger sizes // to efficiently operate on polynomials in evaluation form. // (in evaluation form, the domain needs to grow as the degree of a polynomial grows) - let d2 = Domain::::new(2 * n).ok_or(SetupError::DomainCreation( - "construction of domain d2 did not work as intended", + let d2 = Domain::::new(2 * n).ok_or(DomainCreationError::DomainConstructionFailed( + "d2".to_string(), + 2 * n, ))?; - let d4 = Domain::::new(4 * n).ok_or(SetupError::DomainCreation( - "construction of domain d4 did not work as intended", + let d4 = Domain::::new(4 * n).ok_or(DomainCreationError::DomainConstructionFailed( + "d4".to_string(), + 4 * n, ))?; - let d8 = Domain::::new(8 * n).ok_or(SetupError::DomainCreation( - "construction of domain d8 did not work as intended", + let d8 = Domain::::new(8 * n).ok_or(DomainCreationError::DomainConstructionFailed( + "d8".to_string(), + 8 * n, ))?; // ensure the relationship between the three domains in case the library's behavior changes diff --git a/kimchi/src/circuits/expr.rs b/kimchi/src/circuits/expr.rs index 74608d91d5..cd0e5ec2ef 100644 --- a/kimchi/src/circuits/expr.rs +++ b/kimchi/src/circuits/expr.rs @@ -1,5 +1,6 @@ use crate::{ circuits::{ + berkeley_columns, constraints::FeatureFlags, domains::EvaluationDomains, gate::{CurrOrNext, GateType}, @@ -7,10 +8,10 @@ use crate::{ index::LookupSelectors, lookups::{LookupPattern, LookupPatterns}, }, - polynomials::permutation::eval_vanishes_on_last_4_rows, + polynomials::permutation::eval_vanishes_on_last_n_rows, wires::COLUMNS, }, - proof::{PointEvaluations, ProofEvaluations}, + proof::PointEvaluations, }; use ark_ff::{FftField, Field, One, PrimeField, Zero}; use ark_poly::{ @@ -21,18 +22,22 @@ use o1_utils::{foreign_field::ForeignFieldHelpers, FieldHelpers}; use rayon::prelude::*; use serde::{Deserialize, Serialize}; use std::ops::{Add, AddAssign, Mul, Neg, Sub}; +use std::{ + cmp::Ordering, + fmt::{self, Debug}, + iter::FromIterator, +}; use std::{ collections::{HashMap, HashSet}, ops::MulAssign, }; -use std::{fmt, iter::FromIterator}; use thiserror::Error; use CurrOrNext::{Curr, Next}; use self::constraints::ExprOps; #[derive(Debug, Error)] -pub enum ExprError { +pub enum ExprError { #[error("Empty stack")] EmptyStack, @@ -46,7 +51,7 @@ pub enum ExprError { MissingIndexEvaluation(Column), #[error("Linearization failed (too many unevaluated columns: {0:?}")] - FailedLinearization(Vec), + FailedLinearization(Vec>), #[error("runtime table not available")] MissingRuntime, @@ -67,6 +72,8 @@ pub struct Constants { pub endo_coefficient: F, /// The MDS matrix pub mds: &'static Vec>, + /// The number of zero-knowledge rows + pub zk_rows: u64, } /// The polynomials specific to the lookup argument. @@ -96,8 +103,8 @@ pub struct Environment<'a, F: FftField> { pub witness: &'a [Evaluations>; COLUMNS], /// The coefficient column polynomials pub coefficient: &'a [Evaluations>; COLUMNS], - /// The polynomial which vanishes on the last 4 elements of the domain. - pub vanishes_on_last_4_rows: &'a Evaluations>, + /// The polynomial that vanishes on the zero-knowledge rows and the row before. + pub vanishes_on_zero_knowledge_and_previous_rows: &'a Evaluations>, /// The permutation aggregation polynomial. pub z: &'a Evaluations>, /// The index selector polynomials. @@ -113,9 +120,20 @@ pub struct Environment<'a, F: FftField> { pub lookup: Option>, } -impl<'a, F: FftField> Environment<'a, F> { - fn get_column(&self, col: &Column) -> Option<&'a Evaluations>> { - use Column::*; +pub trait ColumnEnvironment<'a, F: FftField> { + type Column; + fn get_column(&self, col: &Self::Column) -> Option<&'a Evaluations>>; + fn get_domain(&self, d: Domain) -> D; + fn get_constants(&self) -> &Constants; + fn vanishes_on_zero_knowledge_and_previous_rows(&self) -> &'a Evaluations>; + fn l0_1(&self) -> F; +} + +impl<'a, F: FftField> ColumnEnvironment<'a, F> for Environment<'a, F> { + type Column = berkeley_columns::Column; + + fn get_column(&self, col: &Self::Column) -> Option<&'a Evaluations>> { + use berkeley_columns::Column::*; let lookup = self.lookup.as_ref(); match col { Witness(i) => Some(&self.witness[*i]), @@ -134,6 +152,27 @@ impl<'a, F: FftField> Environment<'a, F> { Permutation(_) => None, } } + + fn get_domain(&self, d: Domain) -> D { + match d { + Domain::D1 => self.domain.d1, + Domain::D2 => self.domain.d2, + Domain::D4 => self.domain.d4, + Domain::D8 => self.domain.d8, + } + } + + fn get_constants(&self) -> &Constants { + &self.constants + } + + fn vanishes_on_zero_knowledge_and_previous_rows(&self) -> &'a Evaluations> { + self.vanishes_on_zero_knowledge_and_previous_rows + } + + fn l0_1(&self) -> F { + self.l0_1 + } } // In this file, we define... @@ -164,100 +203,20 @@ fn unnormalized_lagrange_basis(domain: &D, i: i32, pt: &F) -> F domain.evaluate_vanishing_polynomial(*pt) / (*pt - omega_i) } -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize, Deserialize)] -/// A type representing one of the polynomials involved in the PLONK IOP. -pub enum Column { - Witness(usize), - Z, - LookupSorted(usize), - LookupAggreg, - LookupTable, - LookupKindIndex(LookupPattern), - LookupRuntimeSelector, - LookupRuntimeTable, - Index(GateType), - Coefficient(usize), - Permutation(usize), -} - -impl Column { - fn domain(&self) -> Domain { - match self { - Column::Index(GateType::Generic) => Domain::D4, - Column::Index(GateType::CompleteAdd) => Domain::D4, - _ => Domain::D8, - } - } - - fn latex(&self) -> String { - match self { - Column::Witness(i) => format!("w_{{{i}}}"), - Column::Z => "Z".to_string(), - Column::LookupSorted(i) => format!("s_{{{i}}}"), - Column::LookupAggreg => "a".to_string(), - Column::LookupTable => "t".to_string(), - Column::LookupKindIndex(i) => format!("k_{{{i:?}}}"), - Column::LookupRuntimeSelector => "rts".to_string(), - Column::LookupRuntimeTable => "rt".to_string(), - Column::Index(gate) => { - format!("{gate:?}") - } - Column::Coefficient(i) => format!("c_{{{i}}}"), - Column::Permutation(i) => format!("sigma_{{{i}}}"), - } - } - - fn text(&self) -> String { - match self { - Column::Witness(i) => format!("w[{i}]"), - Column::Z => "Z".to_string(), - Column::LookupSorted(i) => format!("s[{i}]"), - Column::LookupAggreg => "a".to_string(), - Column::LookupTable => "t".to_string(), - Column::LookupKindIndex(i) => format!("k[{i:?}]"), - Column::LookupRuntimeSelector => "rts".to_string(), - Column::LookupRuntimeTable => "rt".to_string(), - Column::Index(gate) => { - format!("{gate:?}") - } - Column::Coefficient(i) => format!("c[{i}]"), - Column::Permutation(i) => format!("sigma_[{i}]"), - } - } +pub trait GenericColumn { + fn domain(&self) -> Domain; } #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize, Deserialize)] /// A type representing a variable which can appear in a constraint. It specifies a column /// and a relative position (Curr or Next) -pub struct Variable { +pub struct Variable { /// The column of this variable pub col: Column, /// The row (Curr of Next) of this variable pub row: CurrOrNext, } -impl Variable { - fn ocaml(&self) -> String { - format!("var({:?}, {:?})", self.col, self.row) - } - - fn latex(&self) -> String { - let col = self.col.latex(); - match self.row { - Curr => col, - Next => format!("\\tilde{{{col}}}"), - } - } - - fn text(&self) -> String { - let col = self.col.text(); - match self.row { - Curr => format!("Curr({col})"), - Next => format!("Next({col})"), - } - } -} - #[derive(Clone, Debug, PartialEq)] /// An arithmetic expression over /// @@ -284,7 +243,7 @@ pub enum ConstantExpr { } impl ConstantExpr { - fn to_polish_(&self, res: &mut Vec>) { + fn to_polish_(&self, res: &mut Vec>) { match self { ConstantExpr::Alpha => res.push(PolishToken::Alpha), ConstantExpr::Beta => res.push(PolishToken::Beta), @@ -419,7 +378,7 @@ pub enum Op2 { } impl Op2 { - fn to_polish(&self) -> PolishToken { + fn to_polish(&self) -> PolishToken { use Op2::*; match self { Add => PolishToken::Add, @@ -457,40 +416,49 @@ impl FeatureFlag { } } +#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct RowOffset { + pub zk_rows: bool, + pub offset: i32, +} + /// An multi-variate polynomial over the base ring `C` with /// variables /// /// - `Cell(v)` for `v : Variable` -/// - VanishesOnLast4Rows +/// - VanishesOnZeroKnowledgeAndPreviousRows /// - UnnormalizedLagrangeBasis(i) for `i : i32` /// /// This represents a PLONK "custom constraint", which enforces that /// the corresponding combination of the polynomials corresponding to /// the above variables should vanish on the PLONK domain. #[derive(Clone, Debug, PartialEq)] -pub enum Expr { +pub enum Expr { Constant(C), - Cell(Variable), - Double(Box>), - Square(Box>), - BinOp(Op2, Box>, Box>), - VanishesOnLast4Rows, + Cell(Variable), + Double(Box>), + Square(Box>), + BinOp(Op2, Box>, Box>), + VanishesOnZeroKnowledgeAndPreviousRows, /// UnnormalizedLagrangeBasis(i) is /// (x^n - 1) / (x - omega^i) - UnnormalizedLagrangeBasis(i32), - Pow(Box>, u64), - Cache(CacheId, Box>), + UnnormalizedLagrangeBasis(RowOffset), + Pow(Box>, u64), + Cache(CacheId, Box>), /// If the feature flag is enabled, return the first expression; otherwise, return the second. - IfFeature(FeatureFlag, Box>, Box>), + IfFeature(FeatureFlag, Box>, Box>), } -impl + PartialEq + Clone> Expr { - fn apply_feature_flags_inner(&self, features: &FeatureFlags) -> (Expr, bool) { +impl + PartialEq + Clone, Column: Clone + PartialEq> + Expr +{ + fn apply_feature_flags_inner(&self, features: &FeatureFlags) -> (Expr, bool) { use Expr::*; match self { - Constant(_) | Cell(_) | VanishesOnLast4Rows | UnnormalizedLagrangeBasis(_) => { - (self.clone(), false) - } + Constant(_) + | Cell(_) + | VanishesOnZeroKnowledgeAndPreviousRows + | UnnormalizedLagrangeBasis(_) => (self.clone(), false), Double(c) => { let (c_reduced, reduce_further) = c.apply_feature_flags_inner(features); if reduce_further && c_reduced.is_zero() { @@ -618,7 +586,7 @@ impl + PartialEq + Clone> Expr { } } } - pub fn apply_feature_flags(&self, features: &FeatureFlags) -> Expr { + pub fn apply_feature_flags(&self, features: &FeatureFlags) -> Expr { let (res, _) = self.apply_feature_flags_inner(features); res } @@ -628,7 +596,7 @@ impl + PartialEq + Clone> Expr { /// [reverse Polish notation](https://en.wikipedia.org/wiki/Reverse_Polish_notation) /// expressions, which are vectors of the below tokens. #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub enum PolishToken { +pub enum PolishToken { Alpha, Beta, Gamma, @@ -639,14 +607,14 @@ pub enum PolishToken { col: usize, }, Literal(F), - Cell(Variable), + Cell(Variable), Dup, Pow(u64), Add, Mul, Sub, - VanishesOnLast4Rows, - UnnormalizedLagrangeBasis(i32), + VanishesOnZeroKnowledgeAndPreviousRows, + UnnormalizedLagrangeBasis(RowOffset), Store, Load(usize), /// Skip the given number of tokens if the feature is enabled. @@ -655,33 +623,17 @@ pub enum PolishToken { SkipIfNot(FeatureFlag, usize), } -impl Variable { - fn evaluate( +pub trait ColumnEvaluations { + type Column; + fn evaluate(&self, col: Self::Column) -> Result, ExprError>; +} + +impl Variable { + fn evaluate>( &self, - evals: &ProofEvaluations>, - ) -> Result { - let point_evaluations = { - use Column::*; - let l = evals - .lookup - .as_ref() - .ok_or(ExprError::LookupShouldNotBeUsed); - match self.col { - Witness(i) => Ok(evals.w[i]), - Z => Ok(evals.z), - LookupSorted(i) => l.map(|l| l.sorted[i]), - LookupAggreg => l.map(|l| l.aggreg), - LookupTable => l.map(|l| l.table), - LookupRuntimeTable => l.and_then(|l| l.runtime.ok_or(ExprError::MissingRuntime)), - Index(GateType::Poseidon) => Ok(evals.poseidon_selector), - Index(GateType::Generic) => Ok(evals.generic_selector), - Permutation(i) => Ok(evals.s[i]), - Coefficient(i) => Ok(evals.coefficients[i]), - LookupKindIndex(_) | LookupRuntimeSelector | Index(_) => { - Err(ExprError::MissingIndexEvaluation(self.col)) - } - } - }?; + evals: &Evaluations, + ) -> Result> { + let point_evaluations = evals.evaluate(self.col)?; match self.row { CurrOrNext::Curr => Ok(point_evaluations.zeta), CurrOrNext::Next => Ok(point_evaluations.zeta_omega), @@ -689,15 +641,15 @@ impl Variable { } } -impl PolishToken { +impl PolishToken { /// Evaluate an RPN expression to a field element. - pub fn evaluate( - toks: &[PolishToken], + pub fn evaluate>( + toks: &[PolishToken], d: D, pt: F, - evals: &ProofEvaluations>, + evals: &Evaluations, c: &Constants, - ) -> Result { + ) -> Result> { let mut stack = vec![]; let mut cache: Vec = vec![]; @@ -718,9 +670,16 @@ impl PolishToken { } EndoCoefficient => stack.push(c.endo_coefficient), Mds { row, col } => stack.push(c.mds[*row][*col]), - VanishesOnLast4Rows => stack.push(eval_vanishes_on_last_4_rows(d, pt)), + VanishesOnZeroKnowledgeAndPreviousRows => { + stack.push(eval_vanishes_on_last_n_rows(d, c.zk_rows + 1, pt)) + } UnnormalizedLagrangeBasis(i) => { - stack.push(unnormalized_lagrange_basis(&d, *i, &pt)) + let offset = if i.zk_rows { + -(c.zk_rows as i32) + i.offset + } else { + i.offset + }; + stack.push(unnormalized_lagrange_basis(&d, offset, &pt)) } Literal(x) => stack.push(*x), Dup => stack.push(stack[stack.len() - 1]), @@ -772,9 +731,9 @@ impl PolishToken { } } -impl Expr { +impl Expr { /// Convenience function for constructing cell variables. - pub fn cell(col: Column, row: CurrOrNext) -> Expr { + pub fn cell(col: Column, row: CurrOrNext) -> Expr { Expr::Cell(Variable { col, row }) } @@ -787,31 +746,33 @@ impl Expr { } /// Convenience function for constructing constant expressions. - pub fn constant(c: C) -> Expr { + pub fn constant(c: C) -> Expr { Expr::Constant(c) } - fn degree(&self, d1_size: u64) -> u64 { + fn degree(&self, d1_size: u64, zk_rows: u64) -> u64 { use Expr::*; match self { - Double(x) => x.degree(d1_size), + Double(x) => x.degree(d1_size, zk_rows), Constant(_) => 0, - VanishesOnLast4Rows => 4, + VanishesOnZeroKnowledgeAndPreviousRows => zk_rows + 1, UnnormalizedLagrangeBasis(_) => d1_size, Cell(_) => d1_size, - Square(x) => 2 * x.degree(d1_size), - BinOp(Op2::Mul, x, y) => (*x).degree(d1_size) + (*y).degree(d1_size), + Square(x) => 2 * x.degree(d1_size, zk_rows), + BinOp(Op2::Mul, x, y) => (*x).degree(d1_size, zk_rows) + (*y).degree(d1_size, zk_rows), BinOp(Op2::Add, x, y) | BinOp(Op2::Sub, x, y) => { - std::cmp::max((*x).degree(d1_size), (*y).degree(d1_size)) + std::cmp::max((*x).degree(d1_size, zk_rows), (*y).degree(d1_size, zk_rows)) + } + Pow(e, d) => d * e.degree(d1_size, zk_rows), + Cache(_, e) => e.degree(d1_size, zk_rows), + IfFeature(_, e1, e2) => { + std::cmp::max(e1.degree(d1_size, zk_rows), e2.degree(d1_size, zk_rows)) } - Pow(e, d) => d * e.degree(d1_size), - Cache(_, e) => e.degree(d1_size), - IfFeature(_, e1, e2) => std::cmp::max(e1.degree(d1_size), e2.degree(d1_size)), } } } -impl fmt::Display for Expr> +impl fmt::Display for Expr, berkeley_columns::Column> where F: PrimeField, { @@ -821,7 +782,7 @@ where } #[derive(Clone, Copy, Debug, PartialEq, FromPrimitive, ToPrimitive)] -enum Domain { +pub enum Domain { D1 = 1, D2 = 2, D4 = 4, @@ -894,11 +855,11 @@ pub fn pows(x: F, n: usize) -> Vec { /// = (omega^{q n} omega_8^{r n} - 1) / (omega_8^k - omega^i) /// = ((omega_8^n)^r - 1) / (omega_8^k - omega^i) /// = ((omega_8^n)^r - 1) / (omega^q omega_8^r - omega^i) -fn unnormalized_lagrange_evals( +fn unnormalized_lagrange_evals<'a, F: FftField, Environment: ColumnEnvironment<'a, F>>( l0_1: F, i: i32, res_domain: Domain, - env: &Environment, + env: &Environment, ) -> Evaluations> { let k = match res_domain { Domain::D1 => 1, @@ -906,9 +867,9 @@ fn unnormalized_lagrange_evals( Domain::D4 => 4, Domain::D8 => 8, }; - let res_domain = get_domain(res_domain, env); + let res_domain = env.get_domain(res_domain); - let d1 = env.domain.d1; + let d1 = env.get_domain(Domain::D1); let n = d1.size; // Renormalize negative values to wrap around at domain size let i = if i < 0 { @@ -1358,16 +1319,7 @@ impl<'a, F: FftField> EvalResult<'a, F> { } } -fn get_domain(d: Domain, env: &Environment) -> D { - match d { - Domain::D1 => env.domain.d1, - Domain::D2 => env.domain.d2, - Domain::D4 => env.domain.d4, - Domain::D8 => env.domain.d8, - } -} - -impl Expr> { +impl Expr, Column> { /// Convenience function for constructing expressions from literal /// field elements. pub fn literal(x: F) -> Self { @@ -1377,7 +1329,7 @@ impl Expr> { /// Combines multiple constraints `[c0, ..., cn]` into a single constraint /// `alpha^alpha0 * c0 + alpha^{alpha0 + 1} * c1 + ... + alpha^{alpha0 + n} * cn`. pub fn combine_constraints(alphas: impl Iterator, cs: Vec) -> Self { - let zero = Expr::>::zero(); + let zero = Expr::, Column>::zero(); cs.into_iter() .zip_eq(alphas) .map(|(c, i)| Expr::Constant(ConstantExpr::Alpha.pow(i as u64)) * c) @@ -1385,16 +1337,20 @@ impl Expr> { } } -impl Expr> { +impl Expr, Column> { /// Compile an expression to an RPN expression. - pub fn to_polish(&self) -> Vec> { + pub fn to_polish(&self) -> Vec> { let mut res = vec![]; let mut cache = HashMap::new(); self.to_polish_(&mut cache, &mut res); res } - fn to_polish_(&self, cache: &mut HashMap, res: &mut Vec>) { + fn to_polish_( + &self, + cache: &mut HashMap, + res: &mut Vec>, + ) { match self { Expr::Double(x) => { x.to_polish_(cache, res); @@ -1414,8 +1370,8 @@ impl Expr> { c.to_polish_(res); } Expr::Cell(v) => res.push(PolishToken::Cell(*v)), - Expr::VanishesOnLast4Rows => { - res.push(PolishToken::VanishesOnLast4Rows); + Expr::VanishesOnZeroKnowledgeAndPreviousRows => { + res.push(PolishToken::VanishesOnZeroKnowledgeAndPreviousRows); } Expr::UnnormalizedLagrangeBasis(i) => { res.push(PolishToken::UnnormalizedLagrangeBasis(*i)); @@ -1474,8 +1430,10 @@ impl Expr> { pub fn beta() -> Self { Expr::Constant(ConstantExpr::Beta) } +} - fn evaluate_constants_(&self, c: &Constants) -> Expr { +impl Expr, Column> { + fn evaluate_constants_(&self, c: &Constants) -> Expr { use Expr::*; // TODO: Use cache match self { @@ -1484,7 +1442,7 @@ impl Expr> { Square(x) => x.evaluate_constants_(c).square(), Constant(x) => Constant(x.value(c)), Cell(v) => Cell(*v), - VanishesOnLast4Rows => VanishesOnLast4Rows, + VanishesOnZeroKnowledgeAndPreviousRows => VanishesOnZeroKnowledgeAndPreviousRows, UnnormalizedLagrangeBasis(i) => UnnormalizedLagrangeBasis(*i), BinOp(Op2::Add, x, y) => x.evaluate_constants_(c) + y.evaluate_constants_(c), BinOp(Op2::Mul, x, y) => x.evaluate_constants_(c) * y.evaluate_constants_(c), @@ -1499,24 +1457,28 @@ impl Expr> { } /// Evaluate an expression as a field element against an environment. - pub fn evaluate( + pub fn evaluate< + 'a, + Evaluations: ColumnEvaluations, + Environment: ColumnEnvironment<'a, F, Column = Column>, + >( &self, d: D, pt: F, - evals: &ProofEvaluations>, - env: &Environment, - ) -> Result { - self.evaluate_(d, pt, evals, &env.constants) + evals: &Evaluations, + env: &Environment, + ) -> Result> { + self.evaluate_(d, pt, evals, env.get_constants()) } /// Evaluate an expression as a field element against the constants. - pub fn evaluate_( + pub fn evaluate_>( &self, d: D, pt: F, - evals: &ProofEvaluations>, + evals: &Evaluations, c: &Constants, - ) -> Result { + ) -> Result> { use Expr::*; match self { Double(x) => x.evaluate_(d, pt, evals, c).map(|x| x.double()), @@ -1538,8 +1500,17 @@ impl Expr> { let y = (*y).evaluate_(d, pt, evals, c)?; Ok(x - y) } - VanishesOnLast4Rows => Ok(eval_vanishes_on_last_4_rows(d, pt)), - UnnormalizedLagrangeBasis(i) => Ok(unnormalized_lagrange_basis(&d, *i, &pt)), + VanishesOnZeroKnowledgeAndPreviousRows => { + Ok(eval_vanishes_on_last_n_rows(d, c.zk_rows + 1, pt)) + } + UnnormalizedLagrangeBasis(i) => { + let offset = if i.zk_rows { + -(c.zk_rows as i32) + i.offset + } else { + i.offset + }; + Ok(unnormalized_lagrange_basis(&d, offset, &pt)) + } Cell(v) => v.evaluate(evals), Cache(_, e) => e.evaluate_(d, pt, evals, c), IfFeature(feature, e1, e2) => { @@ -1553,12 +1524,18 @@ impl Expr> { } /// Evaluate the constant expressions in this expression down into field elements. - pub fn evaluate_constants(&self, env: &Environment) -> Expr { - self.evaluate_constants_(&env.constants) + pub fn evaluate_constants<'a, Environment: ColumnEnvironment<'a, F, Column = Column>>( + &self, + env: &Environment, + ) -> Expr { + self.evaluate_constants_(env.get_constants()) } /// Compute the polynomial corresponding to this expression, in evaluation form. - pub fn evaluations(&self, env: &Environment<'_, F>) -> Evaluations> { + pub fn evaluations<'a, Environment: ColumnEnvironment<'a, F, Column = Column>>( + &self, + env: &Environment, + ) -> Evaluations> { self.evaluate_constants(env).evaluations(env) } } @@ -1568,53 +1545,66 @@ enum Either { Right(B), } -impl Expr { +impl Expr { /// Evaluate an expression into a field element. - pub fn evaluate( + pub fn evaluate>( &self, d: D, pt: F, - evals: &ProofEvaluations>, - ) -> Result { + zk_rows: u64, + evals: &Evaluations, + ) -> Result> { use Expr::*; match self { Constant(x) => Ok(*x), - Pow(x, p) => Ok(x.evaluate(d, pt, evals)?.pow([*p])), - Double(x) => x.evaluate(d, pt, evals).map(|x| x.double()), - Square(x) => x.evaluate(d, pt, evals).map(|x| x.square()), + Pow(x, p) => Ok(x.evaluate(d, pt, zk_rows, evals)?.pow([*p])), + Double(x) => x.evaluate(d, pt, zk_rows, evals).map(|x| x.double()), + Square(x) => x.evaluate(d, pt, zk_rows, evals).map(|x| x.square()), BinOp(Op2::Mul, x, y) => { - let x = (*x).evaluate(d, pt, evals)?; - let y = (*y).evaluate(d, pt, evals)?; + let x = (*x).evaluate(d, pt, zk_rows, evals)?; + let y = (*y).evaluate(d, pt, zk_rows, evals)?; Ok(x * y) } BinOp(Op2::Add, x, y) => { - let x = (*x).evaluate(d, pt, evals)?; - let y = (*y).evaluate(d, pt, evals)?; + let x = (*x).evaluate(d, pt, zk_rows, evals)?; + let y = (*y).evaluate(d, pt, zk_rows, evals)?; Ok(x + y) } BinOp(Op2::Sub, x, y) => { - let x = (*x).evaluate(d, pt, evals)?; - let y = (*y).evaluate(d, pt, evals)?; + let x = (*x).evaluate(d, pt, zk_rows, evals)?; + let y = (*y).evaluate(d, pt, zk_rows, evals)?; Ok(x - y) } - VanishesOnLast4Rows => Ok(eval_vanishes_on_last_4_rows(d, pt)), - UnnormalizedLagrangeBasis(i) => Ok(unnormalized_lagrange_basis(&d, *i, &pt)), + VanishesOnZeroKnowledgeAndPreviousRows => { + Ok(eval_vanishes_on_last_n_rows(d, zk_rows + 1, pt)) + } + UnnormalizedLagrangeBasis(i) => { + let offset = if i.zk_rows { + -(zk_rows as i32) + i.offset + } else { + i.offset + }; + Ok(unnormalized_lagrange_basis(&d, offset, &pt)) + } Cell(v) => v.evaluate(evals), - Cache(_, e) => e.evaluate(d, pt, evals), + Cache(_, e) => e.evaluate(d, pt, zk_rows, evals), IfFeature(feature, e1, e2) => { if feature.is_enabled() { - e1.evaluate(d, pt, evals) + e1.evaluate(d, pt, zk_rows, evals) } else { - e2.evaluate(d, pt, evals) + e2.evaluate(d, pt, zk_rows, evals) } } } } /// Compute the polynomial corresponding to this expression, in evaluation form. - pub fn evaluations(&self, env: &Environment<'_, F>) -> Evaluations> { - let d1_size = env.domain.d1.size; - let deg = self.degree(d1_size); + pub fn evaluations<'a, Environment: ColumnEnvironment<'a, F, Column = Column>>( + &self, + env: &Environment, + ) -> Evaluations> { + let d1_size = env.get_domain(Domain::D1).size; + let deg = self.degree(d1_size, env.get_constants().zk_rows); let d = if deg <= d1_size { Domain::D1 } else if deg <= 4 * d1_size { @@ -1637,13 +1627,13 @@ impl Expr { assert_eq!(domain, d); evals } - EvalResult::Constant(x) => EvalResult::init_((d, get_domain(d, env)), |_| x), + EvalResult::Constant(x) => EvalResult::init_((d, env.get_domain(d)), |_| x), EvalResult::SubEvals { evals, domain: d_sub, shift: s, } => { - let res_domain = get_domain(d, env); + let res_domain = env.get_domain(d); let scale = (d_sub as usize) / (d as usize); assert!(scale != 0); EvalResult::init_((d, res_domain), |i| { @@ -1653,16 +1643,16 @@ impl Expr { } } - fn evaluations_helper<'a, 'b>( + fn evaluations_helper<'a, 'b, Environment: ColumnEnvironment<'a, F, Column = Column>>( &self, cache: &'b mut HashMap>, d: Domain, - env: &Environment<'a, F>, + env: &Environment, ) -> Either, CacheId> where 'a: 'b, { - let dom = (d, get_domain(d, env)); + let dom = (d, env.get_domain(d)); let res: EvalResult<'a, F> = match self { Expr::Square(x) => match x.evaluations_helper(cache, d, env) { @@ -1724,22 +1714,29 @@ impl Expr { Expr::Pow(x, p) => { let x = x.evaluations_helper(cache, d, env); match x { - Either::Left(x) => x.pow(*p, (d, get_domain(d, env))), + Either::Left(x) => x.pow(*p, (d, env.get_domain(d))), Either::Right(id) => { - id.get_from(cache).unwrap().pow(*p, (d, get_domain(d, env))) + id.get_from(cache).unwrap().pow(*p, (d, env.get_domain(d))) } } } - Expr::VanishesOnLast4Rows => EvalResult::SubEvals { + Expr::VanishesOnZeroKnowledgeAndPreviousRows => EvalResult::SubEvals { domain: Domain::D8, shift: 0, - evals: env.vanishes_on_last_4_rows, + evals: env.vanishes_on_zero_knowledge_and_previous_rows(), }, Expr::Constant(x) => EvalResult::Constant(*x), - Expr::UnnormalizedLagrangeBasis(i) => EvalResult::Evals { - domain: d, - evals: unnormalized_lagrange_evals(env.l0_1, *i, d, env), - }, + Expr::UnnormalizedLagrangeBasis(i) => { + let offset = if i.zk_rows { + -(env.get_constants().zk_rows as i32) + i.offset + } else { + i.offset + }; + EvalResult::Evals { + domain: d, + evals: unnormalized_lagrange_evals(env.l0_1(), offset, d, env), + } + } Expr::Cell(Variable { col, row }) => { let evals: &'a Evaluations> = { match env.get_column(col) { @@ -1754,7 +1751,7 @@ impl Expr { } } Expr::BinOp(op, e1, e2) => { - let dom = (d, get_domain(d, env)); + let dom = (d, env.get_domain(d)); let f = |x: EvalResult, y: EvalResult| match op { Op2::Mul => x.mul(y, dom), Op2::Add => x.add(y, dom), @@ -1790,12 +1787,12 @@ impl Expr { #[derive(Clone, Debug, Serialize, Deserialize)] /// A "linearization", which is linear combination with `E` coefficients of /// columns. -pub struct Linearization { +pub struct Linearization { pub constant_term: E, pub index_terms: Vec<(Column, E)>, } -impl Default for Linearization { +impl Default for Linearization { fn default() -> Self { Linearization { constant_term: E::default(), @@ -1804,9 +1801,9 @@ impl Default for Linearization { } } -impl Linearization { +impl Linearization { /// Apply a function to all the coefficients in the linearization. - pub fn map B>(&self, f: F) -> Linearization { + pub fn map B>(&self, f: F) -> Linearization { Linearization { constant_term: f(&self.constant_term), index_terms: self.index_terms.iter().map(|(c, x)| (*c, f(x))).collect(), @@ -1814,28 +1811,38 @@ impl Linearization { } } -impl Linearization>> { +impl + Linearization, Column>, Column> +{ /// Evaluate the constants in a linearization with `ConstantExpr` coefficients down /// to literal field elements. - pub fn evaluate_constants(&self, env: &Environment) -> Linearization> { + pub fn evaluate_constants<'a, Environment: ColumnEnvironment<'a, F, Column = Column>>( + &self, + env: &Environment, + ) -> Linearization, Column> { self.map(|e| e.evaluate_constants(env)) } } -impl Linearization>> { +impl Linearization>, Column> { /// Given a linearization and an environment, compute the polynomial corresponding to the /// linearization, in evaluation form. - pub fn to_polynomial( + pub fn to_polynomial< + 'a, + ColEvaluations: ColumnEvaluations, + Environment: ColumnEnvironment<'a, F, Column = Column>, + >( &self, - env: &Environment, + env: &Environment, pt: F, - evals: &ProofEvaluations>, + evals: &ColEvaluations, ) -> (F, Evaluations>) { - let cs = &env.constants; - let n = env.domain.d1.size(); + let cs = env.get_constants(); + let d1 = env.get_domain(Domain::D1); + let n = d1.size(); let mut res = vec![F::zero(); n]; self.index_terms.iter().for_each(|(idx, c)| { - let c = PolishToken::evaluate(c, env.domain.d1, pt, evals, cs).unwrap(); + let c = PolishToken::evaluate(c, d1, pt, evals, cs).unwrap(); let e = env .get_column(idx) .unwrap_or_else(|| panic!("Index polynomial {idx:?} not found")); @@ -1844,28 +1851,35 @@ impl Linearization>> { .enumerate() .for_each(|(i, r)| *r += c * e.evals[scale * i]); }); - let p = Evaluations::>::from_vec_and_domain(res, env.domain.d1); + let p = Evaluations::>::from_vec_and_domain(res, d1); ( - PolishToken::evaluate(&self.constant_term, env.domain.d1, pt, evals, cs).unwrap(), + PolishToken::evaluate(&self.constant_term, d1, pt, evals, cs).unwrap(), p, ) } } -impl Linearization>> { +impl + Linearization, Column>, Column> +{ /// Given a linearization and an environment, compute the polynomial corresponding to the /// linearization, in evaluation form. - pub fn to_polynomial( + pub fn to_polynomial< + 'a, + ColEvaluations: ColumnEvaluations, + Environment: ColumnEnvironment<'a, F, Column = Column>, + >( &self, - env: &Environment, + env: &Environment, pt: F, - evals: &ProofEvaluations>, + evals: &ColEvaluations, ) -> (F, DensePolynomial) { - let cs = &env.constants; - let n = env.domain.d1.size(); + let cs = env.get_constants(); + let d1 = env.get_domain(Domain::D1); + let n = d1.size(); let mut res = vec![F::zero(); n]; self.index_terms.iter().for_each(|(idx, c)| { - let c = c.evaluate_(env.domain.d1, pt, evals, cs).unwrap(); + let c = c.evaluate_(d1, pt, evals, cs).unwrap(); let e = env .get_column(idx) .unwrap_or_else(|| panic!("Index polynomial {idx:?} not found")); @@ -1874,17 +1888,12 @@ impl Linearization>> { .enumerate() .for_each(|(i, r)| *r += c * e.evals[scale * i]) }); - let p = Evaluations::>::from_vec_and_domain(res, env.domain.d1).interpolate(); - ( - self.constant_term - .evaluate_(env.domain.d1, pt, evals, cs) - .unwrap(), - p, - ) + let p = Evaluations::>::from_vec_and_domain(res, d1).interpolate(); + (self.constant_term.evaluate_(d1, pt, evals, cs).unwrap(), p) } } -impl Expr { +impl Expr { /// Exponentiate an expression #[must_use] pub fn pow(self, p: u64) -> Self { @@ -1896,27 +1905,32 @@ impl Expr { } } -type Monomials = HashMap, Expr>; +type Monomials = HashMap>, Expr>; -fn mul_monomials + Clone + One + Zero + PartialEq>( - e1: &Monomials, - e2: &Monomials, -) -> Monomials { - let mut res: HashMap<_, Expr> = HashMap::new(); +fn mul_monomials< + F: Neg + Clone + One + Zero + PartialEq, + Column: Ord + Copy + std::hash::Hash, +>( + e1: &Monomials, + e2: &Monomials, +) -> Monomials { + let mut res: HashMap<_, Expr> = HashMap::new(); for (m1, c1) in e1.iter() { for (m2, c2) in e2.iter() { let mut m = m1.clone(); m.extend(m2); m.sort(); let c1c2 = c1.clone() * c2.clone(); - let v = res.entry(m).or_insert_with(Expr::::zero); + let v = res.entry(m).or_insert_with(Expr::::zero); *v = v.clone() + c1c2; } } res } -impl + Clone + One + Zero + PartialEq> Expr { +impl + Clone + One + Zero + PartialEq, Column: Ord + Copy + std::hash::Hash> + Expr +{ // TODO: This function (which takes linear time) // is called repeatedly in monomials, yielding quadratic behavior for // that function. It's ok for now as we only call that function once on @@ -1930,20 +1944,20 @@ impl + Clone + One + Zero + PartialEq> Expr { Cell(v) => evaluated.contains(&v.col), Double(x) => x.is_constant(evaluated), BinOp(_, x, y) => x.is_constant(evaluated) && y.is_constant(evaluated), - VanishesOnLast4Rows => true, + VanishesOnZeroKnowledgeAndPreviousRows => true, UnnormalizedLagrangeBasis(_) => true, Cache(_, x) => x.is_constant(evaluated), IfFeature(_, e1, e2) => e1.is_constant(evaluated) && e2.is_constant(evaluated), } } - fn monomials(&self, ev: &HashSet) -> HashMap, Expr> { - let sing = |v: Vec, c: Expr| { + fn monomials(&self, ev: &HashSet) -> HashMap>, Expr> { + let sing = |v: Vec>, c: Expr| { let mut h = HashMap::new(); h.insert(v, c); h }; - let constant = |e: Expr| sing(vec![], e); + let constant = |e: Expr| sing(vec![], e); use Expr::*; if self.is_constant(ev) { @@ -1953,7 +1967,7 @@ impl + Clone + One + Zero + PartialEq> Expr { match self { Pow(x, d) => { // Run the multiplication logic with square and multiply - let mut acc = sing(vec![], Expr::::one()); + let mut acc = sing(vec![], Expr::::one()); let mut acc_is_one = true; let x = x.monomials(ev); @@ -1976,7 +1990,9 @@ impl + Clone + One + Zero + PartialEq> Expr { } Cache(_, e) => e.monomials(ev), UnnormalizedLagrangeBasis(i) => constant(UnnormalizedLagrangeBasis(*i)), - VanishesOnLast4Rows => constant(VanishesOnLast4Rows), + VanishesOnZeroKnowledgeAndPreviousRows => { + constant(VanishesOnZeroKnowledgeAndPreviousRows) + } Constant(c) => constant(Constant(c.clone())), Cell(var) => sing(vec![*var], Constant(F::one())), BinOp(Op2::Add, e1, e2) => { @@ -2060,9 +2076,9 @@ impl + Clone + One + Zero + PartialEq> Expr { pub fn linearize( &self, evaluated: HashSet, - ) -> Result>, ExprError> { - let mut res: HashMap> = HashMap::new(); - let mut constant_term: Expr = Self::zero(); + ) -> Result, Column>, ExprError> { + let mut res: HashMap> = HashMap::new(); + let mut constant_term: Expr = Self::zero(); let monomials = self.monomials(&evaluated); for (m, c) in monomials { @@ -2194,7 +2210,7 @@ impl Mul> for ConstantExpr { } } -impl Zero for Expr { +impl Zero for Expr { fn zero() -> Self { Expr::Constant(F::zero()) } @@ -2207,7 +2223,7 @@ impl Zero for Expr { } } -impl One for Expr { +impl One for Expr { fn one() -> Self { Expr::Constant(F::one()) } @@ -2220,10 +2236,10 @@ impl One for Expr { } } -impl> Neg for Expr { - type Output = Expr; +impl, Column> Neg for Expr { + type Output = Expr; - fn neg(self) -> Expr { + fn neg(self) -> Expr { match self { Expr::Constant(x) => Expr::Constant(x.neg()), e => Expr::BinOp( @@ -2235,8 +2251,8 @@ impl> Neg for Expr { } } -impl Add> for Expr { - type Output = Expr; +impl Add> for Expr { + type Output = Expr; fn add(self, other: Self) -> Self { if self.is_zero() { return other; @@ -2248,7 +2264,7 @@ impl Add> for Expr { } } -impl AddAssign> for Expr { +impl AddAssign> for Expr { fn add_assign(&mut self, other: Self) { if self.is_zero() { *self = other; @@ -2258,8 +2274,8 @@ impl AddAssign> for Expr { } } -impl Mul> for Expr { - type Output = Expr; +impl Mul> for Expr { + type Output = Expr; fn mul(self, other: Self) -> Self { if self.is_zero() || other.is_zero() { return Self::zero(); @@ -2275,9 +2291,10 @@ impl Mul> for Expr { } } -impl MulAssign> for Expr +impl MulAssign> for Expr where F: Zero + One + PartialEq + Clone, + Column: PartialEq + Clone, { fn mul_assign(&mut self, other: Self) { if self.is_zero() || other.is_zero() { @@ -2290,8 +2307,8 @@ where } } -impl Sub> for Expr { - type Output = Expr; +impl Sub> for Expr { + type Output = Expr; fn sub(self, other: Self) -> Self { if other.is_zero() { return self; @@ -2300,13 +2317,13 @@ impl Sub> for Expr { } } -impl From for Expr { +impl From for Expr { fn from(x: u64) -> Self { Expr::Constant(F::from(x)) } } -impl From for Expr> { +impl From for Expr, Column> { fn from(x: u64) -> Self { Expr::Constant(ConstantExpr::Literal(F::from(x))) } @@ -2318,8 +2335,8 @@ impl From for ConstantExpr { } } -impl Mul for Expr> { - type Output = Expr>; +impl Mul for Expr, Column> { + type Output = Expr, Column>; fn mul(self, y: F) -> Self::Output { Expr::Constant(ConstantExpr::Literal(y)) * self @@ -2395,7 +2412,7 @@ where } } -impl Expr> +impl Expr, berkeley_columns::Column> where F: PrimeField, { @@ -2422,14 +2439,21 @@ where /// Recursively print the expression, /// except for the cached expression that are stored in the `cache`. - fn ocaml(&self, cache: &mut HashMap>>) -> String { + fn ocaml( + &self, + cache: &mut HashMap, berkeley_columns::Column>>, + ) -> String { use Expr::*; match self { Double(x) => format!("double({})", x.ocaml(cache)), Constant(x) => x.ocaml(), Cell(v) => format!("cell({})", v.ocaml()), - UnnormalizedLagrangeBasis(i) => format!("unnormalized_lagrange_basis({})", *i), - VanishesOnLast4Rows => "vanishes_on_last_4_rows".to_string(), + UnnormalizedLagrangeBasis(i) => { + format!("unnormalized_lagrange_basis({}, {})", i.zk_rows, i.offset) + } + VanishesOnZeroKnowledgeAndPreviousRows => { + "vanishes_on_zero_knowledge_and_previous_rows".to_string() + } BinOp(Op2::Add, x, y) => format!("({} + {})", x.ocaml(cache), y.ocaml(cache)), BinOp(Op2::Mul, x, y) => format!("({} * {})", x.ocaml(cache), y.ocaml(cache)), BinOp(Op2::Sub, x, y) => format!("({} - {})", x.ocaml(cache), y.ocaml(cache)), @@ -2471,14 +2495,30 @@ where res } - fn latex(&self, cache: &mut HashMap>>) -> String { + fn latex( + &self, + cache: &mut HashMap, berkeley_columns::Column>>, + ) -> String { use Expr::*; match self { Double(x) => format!("2 ({})", x.latex(cache)), Constant(x) => x.latex(), Cell(v) => v.latex(), - UnnormalizedLagrangeBasis(i) => format!("unnormalized\\_lagrange\\_basis({})", *i), - VanishesOnLast4Rows => "vanishes\\_on\\_last\\_4\\_rows".to_string(), + UnnormalizedLagrangeBasis(RowOffset { + zk_rows: true, + offset: i, + }) => { + format!("unnormalized\\_lagrange\\_basis(zk\\_rows + {})", *i) + } + UnnormalizedLagrangeBasis(RowOffset { + zk_rows: false, + offset: i, + }) => { + format!("unnormalized\\_lagrange\\_basis({})", *i) + } + VanishesOnZeroKnowledgeAndPreviousRows => { + "vanishes\\_on\\_zero\\_knowledge\\_and\\_previous\\_row".to_string() + } BinOp(Op2::Add, x, y) => format!("({} + {})", x.latex(cache), y.latex(cache)), BinOp(Op2::Mul, x, y) => format!("({} \\cdot {})", x.latex(cache), y.latex(cache)), BinOp(Op2::Sub, x, y) => format!("({} - {})", x.latex(cache), y.latex(cache)), @@ -2494,14 +2534,32 @@ where /// Recursively print the expression, /// except for the cached expression that are stored in the `cache`. - fn text(&self, cache: &mut HashMap>>) -> String { + fn text( + &self, + cache: &mut HashMap, berkeley_columns::Column>>, + ) -> String { use Expr::*; match self { Double(x) => format!("double({})", x.text(cache)), Constant(x) => x.text(), Cell(v) => v.text(), - UnnormalizedLagrangeBasis(i) => format!("unnormalized_lagrange_basis({})", *i), - VanishesOnLast4Rows => "vanishes_on_last_4_rows".to_string(), + UnnormalizedLagrangeBasis(RowOffset { + zk_rows: true, + offset: i, + }) => match i.cmp(&0) { + Ordering::Greater => format!("unnormalized_lagrange_basis(zk_rows + {})", *i), + Ordering::Equal => "unnormalized_lagrange_basis(zk_rows)".to_string(), + Ordering::Less => format!("unnormalized_lagrange_basis(zk_rows - {})", (-*i)), + }, + UnnormalizedLagrangeBasis(RowOffset { + zk_rows: false, + offset: i, + }) => { + format!("unnormalized_lagrange_basis({})", *i) + } + VanishesOnZeroKnowledgeAndPreviousRows => { + "vanishes_on_zero_knowledge_and_previous_rows".to_string() + } BinOp(Op2::Add, x, y) => format!("({} + {})", x.text(cache), y.text(cache)), BinOp(Op2::Mul, x, y) => format!("({} * {})", x.text(cache), y.text(cache)), BinOp(Op2::Sub, x, y) => format!("({} - {})", x.text(cache), y.text(cache)), @@ -2612,24 +2670,34 @@ pub mod constraints { fn cache(&self, cache: &mut Cache) -> Self; } - impl ExprOps for Expr> + impl ExprOps for Expr, berkeley_columns::Column> where F: PrimeField, + Expr, berkeley_columns::Column>: std::fmt::Display, { fn two_pow(pow: u64) -> Self { - Expr::>::literal(>::two_pow(pow)) + Expr::, berkeley_columns::Column>::literal(>::two_pow(pow)) } fn two_to_limb() -> Self { - Expr::>::literal(>::two_to_limb()) + Expr::, berkeley_columns::Column>::literal(>::two_to_limb( + )) } fn two_to_2limb() -> Self { - Expr::>::literal(>::two_to_2limb()) + Expr::, berkeley_columns::Column>::literal(>::two_to_2limb( + )) } fn two_to_3limb() -> Self { - Expr::>::literal(>::two_to_3limb()) + Expr::, berkeley_columns::Column>::literal(>::two_to_3limb( + )) } fn double(&self) -> Self { @@ -2765,7 +2833,7 @@ pub mod constraints { // /// An alias for the intended usage of the expression type in constructing constraints. -pub type E = Expr>; +pub type E = Expr, berkeley_columns::Column>; /// Convenience function to create a constant as [Expr]. pub fn constant(x: F) -> E { @@ -2774,7 +2842,7 @@ pub fn constant(x: F) -> E { /// Helper function to quickly create an expression for a witness. pub fn witness(i: usize, row: CurrOrNext) -> E { - E::::cell(Column::Witness(i), row) + E::::cell(berkeley_columns::Column::Witness(i), row) } /// Same as [witness] but for the current row. @@ -2789,11 +2857,11 @@ pub fn witness_next(i: usize) -> E { /// Handy function to quickly create an expression for a gate. pub fn index(g: GateType) -> E { - E::::cell(Column::Index(g), CurrOrNext::Curr) + E::::cell(berkeley_columns::Column::Index(g), CurrOrNext::Curr) } pub fn coeff(i: usize) -> E { - E::::cell(Column::Coefficient(i), CurrOrNext::Curr) + E::::cell(berkeley_columns::Column::Coefficient(i), CurrOrNext::Curr) } /// Auto clone macro - Helps make constraints more readable @@ -2832,18 +2900,18 @@ pub mod test { use super::*; use crate::{ circuits::{ - constraints::ConstraintSystem, - expr::constraints::ExprOps, - gate::CircuitGate, - polynomials::{generic::GenericGateSpec, permutation::ZK_ROWS}, - wires::Wire, + constraints::ConstraintSystem, expr::constraints::ExprOps, gate::CircuitGate, + polynomials::generic::GenericGateSpec, wires::Wire, }, curve::KimchiCurve, prover_index::ProverIndex, }; use ark_ff::UniformRand; use mina_curves::pasta::{Fp, Pallas, Vesta}; - use poly_commitment::srs::{endos, SRS}; + use poly_commitment::{ + evaluation_proof::OpeningProof, + srs::{endos, SRS}, + }; use rand::{prelude::StdRng, SeedableRng}; use std::array; use std::sync::Arc; @@ -2893,7 +2961,7 @@ pub mod test { let srs = Arc::new(srs); let (endo_q, _endo_r) = endos::(); - ProverIndex::::create(constraint_system, endo_q, srs) + ProverIndex::>::create(constraint_system, endo_q, srs) }; let witness_cols: [_; COLUMNS] = array::from_fn(|_| DensePolynomial::zero()); @@ -2908,10 +2976,14 @@ pub mod test { joint_combiner: None, endo_coefficient: one, mds: &Vesta::sponge_params().mds, + zk_rows: 3, }, witness: &domain_evals.d8.this.w, coefficient: &index.column_evaluations.coefficients8, - vanishes_on_last_4_rows: &index.cs.precomputations().vanishes_on_last_4_rows, + vanishes_on_zero_knowledge_and_previous_rows: &index + .cs + .precomputations() + .vanishes_on_zero_knowledge_and_previous_rows, z: &domain_evals.d8.this.z, l0_1: l0_1(index.cs.domain.d1), domain: index.cs.domain, @@ -2925,7 +2997,8 @@ pub mod test { #[test] fn test_unnormalized_lagrange_basis() { - let domain = EvaluationDomains::::create(2usize.pow(10) + ZK_ROWS as usize) + let zk_rows = 3; + let domain = EvaluationDomains::::create(2usize.pow(10) + zk_rows) .expect("failed to create evaluation domain"); let rng = &mut StdRng::from_seed([17u8; 32]); diff --git a/kimchi/src/circuits/gate.rs b/kimchi/src/circuits/gate.rs index 3a61737f30..ec5b2f9377 100644 --- a/kimchi/src/circuits/gate.rs +++ b/kimchi/src/circuits/gate.rs @@ -16,6 +16,7 @@ use crate::{ use ark_ff::{bytes::ToBytes, PrimeField, SquareRootField}; use num_traits::cast::ToPrimitive; use o1_utils::hasher::CryptoDigest; +use poly_commitment::OpenProof; use serde::{Deserialize, Serialize}; use serde_with::serde_as; use std::io::{Result as IoResult, Write}; @@ -191,11 +192,11 @@ impl CircuitGate { /// # Errors /// /// Will give error if verify process returns error. - pub fn verify>( + pub fn verify, OpeningProof: OpenProof>( &self, row: usize, witness: &[Vec; COLUMNS], - index: &ProverIndex, + index: &ProverIndex, public: &[F], ) -> Result<(), String> { use GateType::*; @@ -255,6 +256,7 @@ impl CircuitGate { joint_combiner: Some(F::one()), endo_coefficient: cs.endo, mds: &G::sponge_params().mds, + zk_rows: cs.zk_rows, }; // Create the argument environment for the constraints over field elements let env = ArgumentEnv::::create(argument_witness, self.coeffs.clone(), constants); diff --git a/kimchi/src/circuits/lookup/constraints.rs b/kimchi/src/circuits/lookup/constraints.rs index 067153a758..3cbf3f34ad 100644 --- a/kimchi/src/circuits/lookup/constraints.rs +++ b/kimchi/src/circuits/lookup/constraints.rs @@ -1,6 +1,7 @@ use crate::{ circuits::{ - expr::{prologue::*, Column, ConstantExpr}, + berkeley_columns::Column, + expr::{prologue::*, ConstantExpr, RowOffset}, gate::{CircuitGate, CurrOrNext}, lookup::lookups::{ JointLookup, JointLookupSpec, JointLookupValue, LocalPosition, LookupInfo, @@ -23,9 +24,6 @@ use super::runtime_tables; /// Number of constraints produced by the argument. pub const CONSTRAINTS: u32 = 7; -/// The number of random values to append to columns for zero-knowledge. -pub const ZK_ROWS: usize = 3; - /// Pad with zeroes and then add 3 random elements in the last two /// rows for zero knowledge. /// @@ -35,13 +33,15 @@ pub const ZK_ROWS: usize = 3; pub fn zk_patch( mut e: Vec, d: D, + zk_rows: usize, rng: &mut R, ) -> Evaluations> { let n = d.size(); let k = e.len(); - assert!(k <= n - ZK_ROWS); - e.extend((0..((n - ZK_ROWS) - k)).map(|_| F::zero())); - e.extend((0..ZK_ROWS).map(|_| F::rand(rng))); + let last_non_zk_row = n - zk_rows; + assert!(k <= last_non_zk_row); + e.extend((k..last_non_zk_row).map(|_| F::zero())); + e.extend((0..zk_rows).map(|_| F::rand(rng))); Evaluations::>::from_vec_and_domain(e, d) } @@ -93,6 +93,7 @@ pub fn sorted( joint_combiner: F, table_id_combiner: F, lookup_info: &LookupInfo, + zk_rows: usize, ) -> Result>, ProverError> { // We pad the lookups so that it is as if we lookup exactly // `max_lookups_per_row` in every row. @@ -100,7 +101,7 @@ pub fn sorted( let n = d1.size(); let mut counts: HashMap<&F, usize> = HashMap::new(); - let lookup_rows = n - ZK_ROWS - 1; + let lookup_rows = n - zk_rows - 1; let by_row = lookup_info.by_row(gates); let max_lookups_per_row = lookup_info.max_per_row; @@ -238,13 +239,14 @@ pub fn aggregation( sorted: &[Evaluations>], rng: &mut R, lookup_info: &LookupInfo, + zk_rows: usize, ) -> Result>, ProverError> where R: Rng + ?Sized, F: PrimeField, { let n = d1.size(); - let lookup_rows = n - ZK_ROWS - 1; + let lookup_rows = n - zk_rows - 1; let beta1: F = F::one() + beta; let gammabeta1 = gamma * beta1; let mut lookup_aggreg = vec![F::one()]; @@ -316,11 +318,11 @@ where lookup_aggreg[i + 1] *= prev; }); - let res = zk_patch(lookup_aggreg, d1, rng); + let res = zk_patch(lookup_aggreg, d1, zk_rows, rng); // check that the final evaluation is equal to 1 if cfg!(debug_assertions) { - let final_val = res.evals[d1.size() - (ZK_ROWS + 1)]; + let final_val = res.evals[d1.size() - (zk_rows + 1)]; if final_val != F::one() { panic!("aggregation incorrect: {final_val}"); } @@ -599,14 +601,20 @@ pub fn constraints( let aggreg_equation = E::cell(Column::LookupAggreg, Next) * denominator - E::cell(Column::LookupAggreg, Curr) * numerator; - let final_lookup_row: i32 = -(ZK_ROWS as i32) - 1; + let final_lookup_row = RowOffset { + zk_rows: true, + offset: -1, + }; let mut res = vec![ - // the accumulator except for the last 4 rows + // the accumulator except for the last zk_rows+1 rows // (contains the zk-rows and the last value of the accumulator) - E::VanishesOnLast4Rows * aggreg_equation, + E::VanishesOnZeroKnowledgeAndPreviousRows * aggreg_equation, // the initial value of the accumulator - E::UnnormalizedLagrangeBasis(0) * (E::cell(Column::LookupAggreg, Curr) - E::one()), + E::UnnormalizedLagrangeBasis(RowOffset { + zk_rows: false, + offset: 0, + }) * (E::cell(Column::LookupAggreg, Curr) - E::one()), // Check that the final value of the accumulator is 1 E::UnnormalizedLagrangeBasis(final_lookup_row) * (E::cell(Column::LookupAggreg, Curr) - E::one()), @@ -620,7 +628,10 @@ pub fn constraints( final_lookup_row } else { // Check compatibility of the first elements - 0 + RowOffset { + zk_rows: false, + offset: 0, + } }; let mut expr = E::UnnormalizedLagrangeBasis(first_or_last) * (column(Column::LookupSorted(i)) - column(Column::LookupSorted(i + 1))); @@ -679,12 +690,13 @@ pub fn verify, TABLE: Fn() -> I>( table_id_combiner: &F, sorted: &[Evaluations>], lookup_info: &LookupInfo, + zk_rows: usize, ) { sorted .iter() .for_each(|s| assert_eq!(d1.size, s.domain().size)); let n = d1.size(); - let lookup_rows = n - ZK_ROWS - 1; + let lookup_rows = n - zk_rows - 1; // Check that the (desnakified) sorted table is // 1. Sorted diff --git a/kimchi/src/circuits/lookup/index.rs b/kimchi/src/circuits/lookup/index.rs index 7dfdca1bc4..ab3e4545ab 100644 --- a/kimchi/src/circuits/lookup/index.rs +++ b/kimchi/src/circuits/lookup/index.rs @@ -7,7 +7,6 @@ use crate::circuits::{ lookups::{LookupInfo, LookupPattern}, tables::LookupTable, }, - polynomials::permutation::ZK_ROWS, }; use ark_ff::{FftField, PrimeField, SquareRootField}; use ark_poly::{ @@ -204,6 +203,7 @@ impl LookupConstraintSystem { lookup_tables: Vec>, runtime_tables: Option>>, domain: &EvaluationDomains, + zk_rows: usize, ) -> Result, LookupError> { //~ 1. If no lookup is used in the circuit, do not create a lookup index match LookupInfo::create_from_gates(gates, runtime_tables.is_some()) { @@ -212,10 +212,10 @@ impl LookupConstraintSystem { let d1_size = domain.d1.size(); // The maximum number of entries that can be provided across all tables. - // Since we do not assert the lookup constraint on the final `ZK_ROWS` rows, and + // Since we do not assert the lookup constraint on the final `zk_rows` rows, and // because the row before is used to assert that the lookup argument's final // product is 1, we cannot use those rows to store any values. - let max_num_entries = d1_size - (ZK_ROWS as usize) - 1; + let max_num_entries = d1_size - zk_rows - 1; //~ 2. Get the lookup selectors and lookup tables (TODO: how?) let (lookup_selectors, gate_lookup_tables) = @@ -257,8 +257,8 @@ impl LookupConstraintSystem { .take(d1_size - runtime_table_offset - runtime_len), ); - // although the last ZK_ROWS are fine - for e in evals.iter_mut().rev().take(ZK_ROWS as usize) { + // although the last zk_rows are fine + for e in evals.iter_mut().rev().take(zk_rows) { *e = F::zero(); } @@ -296,7 +296,7 @@ impl LookupConstraintSystem { //~ that a lookup table can have. let max_table_width = lookup_tables .iter() - .map(|table| table.data.len()) + .map(|table| table.width()) .max() .unwrap_or(0); @@ -348,7 +348,7 @@ impl LookupConstraintSystem { let mut has_table_id_0_with_zero_entry = false; for table in &lookup_tables { - let table_len = table.data[0].len(); + let table_len = table.len(); if table.id == 0 { has_table_id_0 = true; @@ -366,6 +366,7 @@ impl LookupConstraintSystem { //~~ * Copy the entries from the table to new rows in the corresponding columns of the concatenated table. for (i, col) in table.data.iter().enumerate() { + // See GH issue: https://github.com/MinaProtocol/mina/issues/14097 if col.len() != table_len { return Err(LookupError::InconsistentTableLength); } @@ -373,7 +374,7 @@ impl LookupConstraintSystem { } //~~ * Fill in any unused columns with 0 (to match the dummy value) - for lookup_table in lookup_table.iter_mut().skip(table.data.len()) { + for lookup_table in lookup_table.iter_mut().skip(table.width()) { lookup_table.extend(repeat_n(F::zero(), table_len)); } } diff --git a/kimchi/src/circuits/lookup/lookups.rs b/kimchi/src/circuits/lookup/lookups.rs index 8d9bec6169..b126e1be00 100644 --- a/kimchi/src/circuits/lookup/lookups.rs +++ b/kimchi/src/circuits/lookup/lookups.rs @@ -42,6 +42,7 @@ fn max_lookups_per_row(kinds: LookupPatterns) -> usize { feature = "ocaml_types", derive(ocaml::IntoValue, ocaml::FromValue, ocaml_gen::Struct) )] +#[cfg_attr(feature = "wasm_types", wasm_bindgen::prelude::wasm_bindgen)] pub struct LookupPatterns { pub xor: bool, pub lookup: bool, @@ -133,6 +134,7 @@ impl LookupPatterns { feature = "ocaml_types", derive(ocaml::IntoValue, ocaml::FromValue, ocaml_gen::Struct) )] +#[cfg_attr(feature = "wasm_types", wasm_bindgen::prelude::wasm_bindgen)] pub struct LookupFeatures { /// A single lookup constraint is a vector of lookup constraints to be applied at a row. pub patterns: LookupPatterns, @@ -157,7 +159,8 @@ impl LookupFeatures { } /// Describes the desired lookup configuration. -#[derive(Clone, Serialize, Deserialize, Debug)] +#[derive(Copy, Clone, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "wasm_types", wasm_bindgen::prelude::wasm_bindgen)] pub struct LookupInfo { /// The maximum length of an element of `kinds`. This can be computed from `kinds`. pub max_per_row: usize, @@ -547,3 +550,58 @@ fn lookup_pattern_constants_correct() { assert_eq!((pat, pat.max_joint_size()), (pat, max_joint_size as u32)); } } + +#[cfg(feature = "wasm_types")] +pub mod wasm { + use super::*; + + #[wasm_bindgen::prelude::wasm_bindgen] + impl LookupPatterns { + #[wasm_bindgen::prelude::wasm_bindgen(constructor)] + pub fn new( + xor: bool, + lookup: bool, + range_check: bool, + foreign_field_mul: bool, + ) -> LookupPatterns { + LookupPatterns { + xor, + lookup, + range_check, + foreign_field_mul, + } + } + } + + #[wasm_bindgen::prelude::wasm_bindgen] + impl LookupFeatures { + #[wasm_bindgen::prelude::wasm_bindgen(constructor)] + pub fn new( + patterns: LookupPatterns, + joint_lookup_used: bool, + uses_runtime_tables: bool, + ) -> LookupFeatures { + LookupFeatures { + patterns, + joint_lookup_used, + uses_runtime_tables, + } + } + } + + #[wasm_bindgen::prelude::wasm_bindgen] + impl LookupInfo { + #[wasm_bindgen::prelude::wasm_bindgen(constructor)] + pub fn new( + max_per_row: usize, + max_joint_size: u32, + features: LookupFeatures, + ) -> LookupInfo { + LookupInfo { + max_per_row, + max_joint_size, + features, + } + } + } +} diff --git a/kimchi/src/circuits/lookup/runtime_tables.rs b/kimchi/src/circuits/lookup/runtime_tables.rs index 98b018ed8a..f8123d75d8 100644 --- a/kimchi/src/circuits/lookup/runtime_tables.rs +++ b/kimchi/src/circuits/lookup/runtime_tables.rs @@ -2,10 +2,7 @@ //! The setup has to prepare for their presence using [`RuntimeTableCfg`]. //! At proving time, the prover can use [`RuntimeTable`] to specify the actual tables. -use crate::circuits::{ - expr::{prologue::*, Column}, - gate::CurrOrNext, -}; +use crate::circuits::{berkeley_columns::Column, expr::prologue::*, gate::CurrOrNext}; use ark_ff::Field; use serde::{Deserialize, Serialize}; diff --git a/kimchi/src/circuits/lookup/tables/mod.rs b/kimchi/src/circuits/lookup/tables/mod.rs index 07360a4067..ac259385e7 100644 --- a/kimchi/src/circuits/lookup/tables/mod.rs +++ b/kimchi/src/circuits/lookup/tables/mod.rs @@ -51,7 +51,7 @@ where pub fn has_zero_entry(&self) -> bool { // reminder: a table is written as a list of columns, // not as a list of row entries. - for row in 0..self.data[0].len() { + for row in 0..self.len() { for col in &self.data { if !col[row].is_zero() { continue; @@ -63,6 +63,13 @@ where false } + /// Returns the number of columns, i.e. the width of the table. + /// It is less error prone to introduce this method than using the public + /// field data. + pub fn width(&self) -> usize { + self.data.len() + } + /// Returns the length of the table. pub fn len(&self) -> usize { self.data[0].len() diff --git a/kimchi/src/circuits/mod.rs b/kimchi/src/circuits/mod.rs index 83dbc91c63..7bad1cfd9f 100644 --- a/kimchi/src/circuits/mod.rs +++ b/kimchi/src/circuits/mod.rs @@ -2,6 +2,7 @@ pub mod macros; pub mod argument; +pub mod berkeley_columns; pub mod constraints; pub mod domain_constant_evaluation; pub mod domains; diff --git a/kimchi/src/circuits/polynomials/endosclmul.rs b/kimchi/src/circuits/polynomials/endosclmul.rs index 6dc41ba185..5a95be20b1 100644 --- a/kimchi/src/circuits/polynomials/endosclmul.rs +++ b/kimchi/src/circuits/polynomials/endosclmul.rs @@ -145,6 +145,7 @@ impl CircuitGate { joint_combiner: None, mds: &G::sponge_params().mds, endo_coefficient: cs.endo, + zk_rows: cs.zk_rows, }; let evals: ProofEvaluations> = diff --git a/kimchi/src/circuits/polynomials/foreign_field_add/witness.rs b/kimchi/src/circuits/polynomials/foreign_field_add/witness.rs index 57af9b6ff0..950f7a265d 100644 --- a/kimchi/src/circuits/polynomials/foreign_field_add/witness.rs +++ b/kimchi/src/circuits/polynomials/foreign_field_add/witness.rs @@ -185,7 +185,7 @@ fn init_ffadd_row( overflow: F, carry: F, ) { - let layout: [Vec>>; 1] = [ + let layout: [Vec>>; 1] = [ // ForeignFieldAdd row vec![ VariableCell::create("left_lo"), @@ -221,7 +221,7 @@ fn init_bound_rows( bound: &[F; 3], carry: &F, ) { - let layout: [Vec>>; 2] = [ + let layout: [Vec>>; 2] = [ vec![ // ForeignFieldAdd row VariableCell::create("result_lo"), diff --git a/kimchi/src/circuits/polynomials/foreign_field_mul/witness.rs b/kimchi/src/circuits/polynomials/foreign_field_mul/witness.rs index 50fb4d2305..2c1e0e328d 100644 --- a/kimchi/src/circuits/polynomials/foreign_field_mul/witness.rs +++ b/kimchi/src/circuits/polynomials/foreign_field_mul/witness.rs @@ -41,7 +41,7 @@ use super::circuitgates; // // so that most significant limb, q2, is in W[2][0]. // -fn create_layout() -> [Vec>>; 2] { +fn create_layout() -> [Vec>>; 2] { [ // ForeignFieldMul row vec![ diff --git a/kimchi/src/circuits/polynomials/generic.rs b/kimchi/src/circuits/polynomials/generic.rs index 6115ce4699..15496c59df 100644 --- a/kimchi/src/circuits/polynomials/generic.rs +++ b/kimchi/src/circuits/polynomials/generic.rs @@ -43,6 +43,7 @@ use crate::circuits::{ use crate::{curve::KimchiCurve, prover_index::ProverIndex}; use ark_ff::{FftField, PrimeField, Zero}; use ark_poly::univariate::DensePolynomial; +use poly_commitment::OpenProof; use std::array; use std::marker::PhantomData; @@ -307,7 +308,9 @@ pub mod testing { } } - impl> ProverIndex { + impl, OpeningProof: OpenProof> + ProverIndex + { /// Function to verify the generic polynomials with a witness. pub fn verify_generic( &self, diff --git a/kimchi/src/circuits/polynomials/permutation.rs b/kimchi/src/circuits/polynomials/permutation.rs index 8ca2057b3d..f94133c08e 100644 --- a/kimchi/src/circuits/polynomials/permutation.rs +++ b/kimchi/src/circuits/polynomials/permutation.rs @@ -57,66 +57,69 @@ use ark_poly::{ use ark_poly::{Polynomial, UVPolynomial}; use blake2::{Blake2b512, Digest}; use o1_utils::{ExtendedDensePolynomial, ExtendedEvaluations}; +use poly_commitment::OpenProof; use rand::{CryptoRng, RngCore}; use rayon::prelude::*; use std::array; /// Number of constraints produced by the argument. pub const CONSTRAINTS: u32 = 3; -pub const ZK_ROWS: u64 = 3; + /// Evaluates the polynomial -/// (x - w^{n - 4}) (x - w^{n - 3}) * (x - w^{n - 2}) * (x - w^{n - 1}) -pub fn eval_vanishes_on_last_4_rows(domain: D, x: F) -> F { - let w4 = domain.group_gen.pow([domain.size - (ZK_ROWS + 1)]); - let w3 = domain.group_gen * w4; - let w2 = domain.group_gen * w3; - let w1 = domain.group_gen * w2; - (x - w1) * (x - w2) * (x - w3) * (x - w4) +/// (x - w^{n - i}) * (x - w^{n - i + 1}) * ... * (x - w^{n - 1}) +pub fn eval_vanishes_on_last_n_rows(domain: D, i: u64, x: F) -> F { + if i == 0 { + return F::one(); + } + let mut term = domain.group_gen.pow([domain.size - i]); + let mut acc = x - term; + for _ in 0..i - 1 { + term *= domain.group_gen; + acc *= x - term; + } + acc } /// The polynomial -/// (x - w^{n - 4}) (x - w^{n - 3}) * (x - w^{n - 2}) * (x - w^{n - 1}) -pub fn vanishes_on_last_4_rows(domain: D) -> DensePolynomial { +/// (x - w^{n - i}) * (x - w^{n - i + 1}) * ... * (x - w^{n - 1}) +pub fn vanishes_on_last_n_rows(domain: D, i: u64) -> DensePolynomial { + let constant = |a: F| DensePolynomial::from_coefficients_slice(&[a]); + if i == 0 { + return constant(F::one()); + } let x = DensePolynomial::from_coefficients_slice(&[F::zero(), F::one()]); - let c = |a: F| DensePolynomial::from_coefficients_slice(&[a]); - let w4 = domain.group_gen.pow([domain.size - (ZK_ROWS + 1)]); - let w3 = domain.group_gen * w4; - let w2 = domain.group_gen * w3; - let w1 = domain.group_gen * w2; - &(&(&x - &c(w1)) * &(&x - &c(w2))) * &(&(&x - &c(w3)) * &(&x - &c(w4))) + let mut term = domain.group_gen.pow([domain.size - i]); + let mut acc = &x - &constant(term); + for _ in 0..i - 1 { + term *= domain.group_gen; + acc = &acc * &(&x - &constant(term)); + } + acc } /// Returns the end of the circuit, which is used for introducing zero-knowledge in the permutation polynomial -pub fn zk_w3(domain: D) -> F { - domain.group_gen.pow([domain.size - (ZK_ROWS)]) +pub fn zk_w(domain: D, zk_rows: u64) -> F { + domain.group_gen.pow([domain.size - zk_rows]) } /// Evaluates the polynomial -/// (x - w^{n - 3}) * (x - w^{n - 2}) * (x - w^{n - 1}) -pub fn eval_zk_polynomial(domain: D, x: F) -> F { - let w3 = zk_w3(domain); - let w2 = domain.group_gen * w3; - let w1 = domain.group_gen * w2; - (x - w1) * (x - w2) * (x - w3) +/// (x - w^{n - zk_rows}) * (x - w^{n - zk_rows + 1}) * (x - w^{n - 1}) +pub fn eval_permutation_vanishing_polynomial(domain: D, zk_rows: u64, x: F) -> F { + let term = domain.group_gen.pow([domain.size - zk_rows]); + (x - term) * (x - term * domain.group_gen) * (x - domain.group_gen.pow([domain.size - 1])) } -/// Computes the zero-knowledge polynomial for blinding the permutation polynomial: `(x-w^{n-k})(x-w^{n-k-1})...(x-w^n)`. -/// Currently, we use k = 3 for 2 blinding factors, -/// see -pub fn zk_polynomial(domain: D) -> DensePolynomial { - let w3 = zk_w3(domain); - let w2 = domain.group_gen * w3; - let w1 = domain.group_gen * w2; - - // (x-w3)(x-w2)(x-w1) = - // x^3 - x^2(w1+w2+w3) + x(w1w2+w1w3+w2w3) - w1w2w3 - let w1w2 = w1 * w2; - DensePolynomial::from_coefficients_slice(&[ - -w1w2 * w3, // 1 - w1w2 + (w1 * w3) + (w3 * w2), // x - -w1 - w2 - w3, // x^2 - F::one(), // x^3 - ]) +/// The polynomial +/// (x - w^{n - zk_rows}) * (x - w^{n - zk_rows + 1}) * (x - w^{n - 1}) +pub fn permutation_vanishing_polynomial( + domain: D, + zk_rows: u64, +) -> DensePolynomial { + let constant = |a: F| DensePolynomial::from_coefficients_slice(&[a]); + let x = DensePolynomial::from_coefficients_slice(&[F::zero(), F::one()]); + let term = domain.group_gen.pow([domain.size - zk_rows]); + &(&(&x - &constant(term)) * &(&x - &constant(term * domain.group_gen))) + * &(&x - &constant(domain.group_gen.pow([domain.size - 1]))) } /// Shifts represent the shifts required in the permutation argument of PLONK. @@ -191,7 +194,9 @@ where } } -impl> ProverIndex { +impl, OpeningProof: OpenProof> + ProverIndex +{ /// permutation quotient poly contribution computation /// /// # Errors @@ -214,6 +219,8 @@ impl> ProverIndex { let alpha1 = alphas.next().expect("missing power of alpha"); let alpha2 = alphas.next().expect("missing power of alpha"); + let zk_rows = self.cs.zk_rows as usize; + // constant gamma in evaluation form (in domain d8) let gamma = &self.cs.precomputations().constant_1_d8.scale(gamma); @@ -275,7 +282,8 @@ impl> ProverIndex { sigmas = &sigmas * &term; } - &(&shifts - &sigmas).scale(alpha0) * &self.cs.precomputations().zkpl + &(&shifts - &sigmas).scale(alpha0) + * &self.cs.precomputations().permutation_vanishing_polynomial_l }; //~ and `bnd`: @@ -301,9 +309,9 @@ impl> ProverIndex { return Err(ProverError::Permutation("first division rest")); } - // accumulator end := (z(x) - 1) / (x - sid[n-3]) + // accumulator end := (z(x) - 1) / (x - sid[n-zk_rows]) let denominator = DensePolynomial::from_coefficients_slice(&[ - -self.cs.sid[self.cs.domain.d1.size() - 3], + -self.cs.sid[self.cs.domain.d1.size() - zk_rows], F::one(), ]); let (bnd2, res) = DenseOrSparsePolynomial::divide_with_q_and_r( @@ -335,7 +343,11 @@ impl> ProverIndex { //~ //~ $\text{scalar} \cdot \sigma_6(x)$ //~ - let zkpm_zeta = self.cs.precomputations().zkpm.evaluate(&zeta); + let zkpm_zeta = self + .cs + .precomputations() + .permutation_vanishing_polynomial_m + .evaluate(&zeta); let scalar = ConstraintSystem::::perm_scalars(e, beta, gamma, alphas, zkpm_zeta); let evals8 = &self.column_evaluations.permutation_coefficients8[PERMUTS - 1].evals; const STRIDE: usize = 8; @@ -390,7 +402,9 @@ impl ConstraintSystem { } } -impl> ProverIndex { +impl, OpeningProof: OpenProof> + ProverIndex +{ /// permutation aggregation polynomial computation /// /// # Errors @@ -409,6 +423,8 @@ impl> ProverIndex { ) -> Result, ProverError> { let n = self.cs.domain.d1.size(); + let zk_rows = self.cs.zk_rows as usize; + // only works if first element is 1 assert_eq!(self.cs.domain.d1.elements().next(), Some(F::one())); @@ -453,7 +469,7 @@ impl> ProverIndex { //~ \end{align} //~ $$ //~ - for j in 0..n - 3 { + for j in 0..n - 1 { z[j + 1] = witness .iter() .zip(self.column_evaluations.permutation_coefficients8.iter()) @@ -461,28 +477,30 @@ impl> ProverIndex { .fold(F::one(), |x, y| x * y); } - ark_ff::fields::batch_inversion::(&mut z[1..=n - 3]); + ark_ff::fields::batch_inversion::(&mut z[1..n]); - for j in 0..n - 3 { - let x = z[j]; - z[j + 1] *= witness - .iter() - .zip(self.cs.shift.iter()) - .map(|(w, s)| w[j] + (self.cs.sid[j] * beta * s) + gamma) - .fold(x, |z, y| z * y); + //~ We randomize the evaluations at `n - zk_rows + 1` and `n - zk_rows + 2` in order to add + //~ zero-knowledge to the protocol. + //~ + for j in 0..n - 1 { + if j != n - zk_rows && j != n - zk_rows + 1 { + let x = z[j]; + z[j + 1] *= witness + .iter() + .zip(self.cs.shift.iter()) + .map(|(w, s)| w[j] + (self.cs.sid[j] * beta * s) + gamma) + .fold(x, |z, y| z * y); + } else { + z[j + 1] = F::rand(rng); + } } - //~ If computed correctly, we should have $z(g^{n-3}) = 1$. + //~ For a valid witness, we then have have $z(g^{n-zk_rows}) = 1$. //~ - if z[n - 3] != F::one() { + if z[n - zk_rows] != F::one() { return Err(ProverError::Permutation("final value")); }; - //~ Finally, randomize the last `EVAL_POINTS` evaluations $z(g^{n-2})$ and $z(g^{n-1})$, - //~ in order to add zero-knowledge to the protocol. - z[n - 2] = F::rand(rng); - z[n - 1] = F::rand(rng); - let res = Evaluations::>::from_vec_and_domain(z, self.cs.domain.d1).interpolate(); Ok(res) } diff --git a/kimchi/src/circuits/polynomials/range_check/witness.rs b/kimchi/src/circuits/polynomials/range_check/witness.rs index e6fd60987f..459c95435f 100644 --- a/kimchi/src/circuits/polynomials/range_check/witness.rs +++ b/kimchi/src/circuits/polynomials/range_check/witness.rs @@ -29,7 +29,7 @@ use o1_utils::foreign_field::BigUintForeignFieldHelpers; /// For example, we can convert the `RangeCheck0` circuit gate into /// a 64-bit lookup by adding two copy constraints to constrain /// columns 1 and 2 to zero. -fn layout() -> [Vec>>; 4] { +fn layout() -> [Vec>>; 4] { [ /* row 1, RangeCheck0 row */ range_check_0_row("v0", 0), @@ -86,7 +86,7 @@ fn layout() -> [Vec>>; 4] { pub fn range_check_0_row( limb_name: &'static str, row: usize, -) -> Vec>> { +) -> Vec>> { vec![ VariableCell::create(limb_name), /* 12-bit copies */ diff --git a/kimchi/src/circuits/polynomials/rot.rs b/kimchi/src/circuits/polynomials/rot.rs index a0bc442a24..bf0df63ab4 100644 --- a/kimchi/src/circuits/polynomials/rot.rs +++ b/kimchi/src/circuits/polynomials/rot.rs @@ -266,7 +266,7 @@ where // ROTATION WITNESS COMPUTATION -fn layout_rot64(curr_row: usize) -> [Vec>>; 3] { +fn layout_rot64(curr_row: usize) -> [Vec>>; 3] { [ rot_row(), range_check_0_row("shifted", curr_row + 1), @@ -274,7 +274,7 @@ fn layout_rot64(curr_row: usize) -> [Vec() -> Vec>> { +fn rot_row() -> Vec>> { vec![ VariableCell::create("word"), VariableCell::create("rotated"), @@ -328,8 +328,8 @@ pub fn extend_rot( rot: u32, side: RotMode, ) { - assert!(rot < 64, "Rotation value must be less than 64"); - assert_ne!(rot, 0, "Rotation value must be non-zero"); + assert!(rot <= 64, "Rotation value must be less or equal than 64"); + let rot = if side == RotMode::Right { 64 - rot } else { @@ -343,8 +343,8 @@ pub fn extend_rot( // shifted [------] * 2^rot // rot = [------|000] // + [---] excess - let shifted = (word as u128 * 2u128.pow(rot) % 2u128.pow(64)) as u64; - let excess = word / 2u64.pow(64 - rot); + let shifted = (word as u128) * 2u128.pow(rot) % 2u128.pow(64); + let excess = (word as u128) / 2u128.pow(64 - rot); let rotated = shifted + excess; // Value for the added value for the bound // Right input of the "FFAdd" for the bound equation diff --git a/kimchi/src/circuits/polynomials/turshi.rs b/kimchi/src/circuits/polynomials/turshi.rs index f038b24aa0..645e5649c1 100644 --- a/kimchi/src/circuits/polynomials/turshi.rs +++ b/kimchi/src/circuits/polynomials/turshi.rs @@ -82,8 +82,9 @@ use crate::{ alphas::Alphas, circuits::{ argument::{Argument, ArgumentEnv, ArgumentType}, + berkeley_columns::Column, constraints::ConstraintSystem, - expr::{self, constraints::ExprOps, Cache, Column, E}, + expr::{self, constraints::ExprOps, Cache, E}, gate::{CircuitGate, GateType}, wires::{GateWires, Wire, COLUMNS}, }, @@ -226,6 +227,7 @@ impl CircuitGate { joint_combiner: None, endo_coefficient: cs.endo, mds: &G::sponge_params().mds, + zk_rows: 3, }; let pt = F::rand(rng); diff --git a/kimchi/src/circuits/polynomials/varbasemul.rs b/kimchi/src/circuits/polynomials/varbasemul.rs index 22b9522195..c211fb1ef8 100644 --- a/kimchi/src/circuits/polynomials/varbasemul.rs +++ b/kimchi/src/circuits/polynomials/varbasemul.rs @@ -12,7 +12,8 @@ use crate::circuits::{ argument::{Argument, ArgumentEnv, ArgumentType}, - expr::{constraints::ExprOps, Cache, Column, Variable}, + berkeley_columns::Column, + expr::{constraints::ExprOps, Cache, Variable as VariableGen}, gate::{CircuitGate, CurrOrNext, GateType}, wires::{GateWires, COLUMNS}, }; @@ -20,6 +21,8 @@ use ark_ff::{FftField, PrimeField}; use std::marker::PhantomData; use CurrOrNext::{Curr, Next}; +type Variable = VariableGen; + //~ We implement custom Plonk constraints for short Weierstrass curve variable base scalar multiplication. //~ //~ Given a finite field $\mathbb{F}_q$ of order $q$, if the order is not a multiple of 2 nor 3, then an diff --git a/kimchi/src/circuits/polynomials/xor.rs b/kimchi/src/circuits/polynomials/xor.rs index a7913f92a7..6750a511bc 100644 --- a/kimchi/src/circuits/polynomials/xor.rs +++ b/kimchi/src/circuits/polynomials/xor.rs @@ -169,10 +169,7 @@ where } // Witness layout -fn layout( - curr_row: usize, - bits: usize, -) -> Vec>>> { +fn layout(curr_row: usize, bits: usize) -> Vec>>> { let num_xor = num_xors(bits); let mut layout = (0..num_xor) .map(|i| xor_row(i, curr_row + i)) @@ -181,10 +178,7 @@ fn layout( layout } -fn xor_row( - nybble: usize, - curr_row: usize, -) -> Vec>> { +fn xor_row(nybble: usize, curr_row: usize) -> Vec>> { let start = nybble * 16; vec![ VariableBitsCell::create("in1", start, None), @@ -205,7 +199,7 @@ fn xor_row( ] } -fn zero_row() -> Vec>> { +fn zero_row() -> Vec>> { vec![ ConstantCell::create(F::zero()), ConstantCell::create(F::zero()), diff --git a/kimchi/src/circuits/witness/constant_cell.rs b/kimchi/src/circuits/witness/constant_cell.rs index ea14b5de8c..cbc7aaa7c8 100644 --- a/kimchi/src/circuits/witness/constant_cell.rs +++ b/kimchi/src/circuits/witness/constant_cell.rs @@ -13,7 +13,7 @@ impl ConstantCell { } } -impl WitnessCell for ConstantCell { +impl WitnessCell for ConstantCell { fn value(&self, _witness: &mut [Vec; W], _variables: &Variables, _index: usize) -> F { self.value } diff --git a/kimchi/src/circuits/witness/copy_bits_cell.rs b/kimchi/src/circuits/witness/copy_bits_cell.rs index 964e2f26bf..a61ca183a8 100644 --- a/kimchi/src/circuits/witness/copy_bits_cell.rs +++ b/kimchi/src/circuits/witness/copy_bits_cell.rs @@ -23,7 +23,7 @@ impl CopyBitsCell { } } -impl WitnessCell for CopyBitsCell { +impl WitnessCell for CopyBitsCell { fn value(&self, witness: &mut [Vec; W], _variables: &Variables, _index: usize) -> F { F::from_bits(&witness[self.col][self.row].to_bits()[self.start..self.end]) .expect("failed to deserialize field bits for copy bits cell") diff --git a/kimchi/src/circuits/witness/copy_cell.rs b/kimchi/src/circuits/witness/copy_cell.rs index ffa8339094..d3fad71654 100644 --- a/kimchi/src/circuits/witness/copy_cell.rs +++ b/kimchi/src/circuits/witness/copy_cell.rs @@ -15,7 +15,7 @@ impl CopyCell { } } -impl WitnessCell for CopyCell { +impl WitnessCell for CopyCell { fn value(&self, witness: &mut [Vec; W], _variables: &Variables, _index: usize) -> F { witness[self.col][self.row] } diff --git a/kimchi/src/circuits/witness/copy_shift_cell.rs b/kimchi/src/circuits/witness/copy_shift_cell.rs index b0ed5d055a..b0c87587d1 100644 --- a/kimchi/src/circuits/witness/copy_shift_cell.rs +++ b/kimchi/src/circuits/witness/copy_shift_cell.rs @@ -15,7 +15,7 @@ impl CopyShiftCell { } } -impl WitnessCell for CopyShiftCell { +impl WitnessCell for CopyShiftCell { fn value(&self, witness: &mut [Vec; W], _variables: &Variables, _index: usize) -> F { F::from(2u32).pow([self.shift]) * witness[self.col][self.row] } diff --git a/kimchi/src/circuits/witness/index_cell.rs b/kimchi/src/circuits/witness/index_cell.rs index 9d6ebefea5..fd2628a316 100644 --- a/kimchi/src/circuits/witness/index_cell.rs +++ b/kimchi/src/circuits/witness/index_cell.rs @@ -18,7 +18,7 @@ impl<'a> IndexCell<'a> { } } -impl<'a, const W: usize, F: Field> WitnessCell> for IndexCell<'a> { +impl<'a, F: Field, const W: usize> WitnessCell, W> for IndexCell<'a> { fn value(&self, _witness: &mut [Vec; W], variables: &Variables>, index: usize) -> F { assert!(index < self.length, "index out of bounds of `IndexCell`"); variables[self.name][index] diff --git a/kimchi/src/circuits/witness/mod.rs b/kimchi/src/circuits/witness/mod.rs index 8dad3a2a40..830e2af5e7 100644 --- a/kimchi/src/circuits/witness/mod.rs +++ b/kimchi/src/circuits/witness/mod.rs @@ -20,10 +20,13 @@ pub use self::{ variables::{variable_map, variables, Variables}, }; -/// Witness cell interface -pub trait WitnessCell { +use super::polynomial::COLUMNS; + +/// Witness cell interface. By default, the witness cell is a single element of type F. +pub trait WitnessCell { fn value(&self, witness: &mut [Vec; W], variables: &Variables, index: usize) -> F; + // Length is 1 by default (T is single F element) unless overridden fn length(&self) -> usize { 1 } @@ -40,25 +43,25 @@ pub trait WitnessCell { /// - layout: the partial layout to initialize from /// - variables: the hashmap of variables to get the values from #[allow(clippy::too_many_arguments)] -pub fn init_cell( +pub fn init_cell( witness: &mut [Vec; W], offset: usize, row: usize, col: usize, cell: usize, index: usize, - layout: &[Vec>>], + layout: &[Vec>>], variables: &Variables, ) { witness[col][row + offset] = layout[row][cell].value(witness, variables, index); } /// Initialize a witness row based on layout and computed variables -pub fn init_row( +pub fn init_row( witness: &mut [Vec; W], offset: usize, row: usize, - layout: &[Vec>>], + layout: &[Vec>>], variables: &Variables, ) { let mut col = 0; @@ -72,10 +75,10 @@ pub fn init_row( } /// Initialize a witness based on layout and computed variables -pub fn init( +pub fn init( witness: &mut [Vec; W], offset: usize, - layout: &[Vec>>], + layout: &[Vec>>], variables: &Variables, ) { for row in 0..layout.len() { @@ -97,7 +100,7 @@ mod tests { #[test] fn zero_layout() { - let layout: Vec>>> = vec![vec![ + let layout: Vec>>> = vec![vec![ ConstantCell::create(PallasField::zero()), ConstantCell::create(PallasField::zero()), ConstantCell::create(PallasField::zero()), @@ -140,7 +143,7 @@ mod tests { #[test] fn mixed_layout() { - let layout: Vec>>> = vec![ + let layout: Vec>>> = vec![ vec![ ConstantCell::create(PallasField::from(12u32)), ConstantCell::create(PallasField::from(0xa5a3u32)), diff --git a/kimchi/src/circuits/witness/variable_bits_cell.rs b/kimchi/src/circuits/witness/variable_bits_cell.rs index 1fef513607..584920592d 100644 --- a/kimchi/src/circuits/witness/variable_bits_cell.rs +++ b/kimchi/src/circuits/witness/variable_bits_cell.rs @@ -18,7 +18,7 @@ impl<'a> VariableBitsCell<'a> { } } -impl<'a, const W: usize, F: Field> WitnessCell for VariableBitsCell<'a> { +impl<'a, F: Field, const W: usize> WitnessCell for VariableBitsCell<'a> { fn value(&self, _witness: &mut [Vec; W], variables: &Variables, _index: usize) -> F { let bits = if let Some(end) = self.end { F::from_bits(&variables[self.name].to_bits()[self.start..end]) diff --git a/kimchi/src/circuits/witness/variable_cell.rs b/kimchi/src/circuits/witness/variable_cell.rs index c24ce57d42..fcb6ee9f21 100644 --- a/kimchi/src/circuits/witness/variable_cell.rs +++ b/kimchi/src/circuits/witness/variable_cell.rs @@ -14,7 +14,7 @@ impl<'a> VariableCell<'a> { } } -impl<'a, const W: usize, F: Field> WitnessCell for VariableCell<'a> { +impl<'a, F: Field, const W: usize> WitnessCell for VariableCell<'a> { fn value(&self, _witness: &mut [Vec; W], variables: &Variables, _index: usize) -> F { variables[self.name] } diff --git a/kimchi/src/curve.rs b/kimchi/src/curve.rs index 981cc0b5f4..57790b10f7 100644 --- a/kimchi/src/curve.rs +++ b/kimchi/src/curve.rs @@ -1,68 +1,112 @@ //! This module contains a useful trait for recursion: [KimchiCurve], //! which defines how a pair of curves interact. -use ark_ec::{short_weierstrass_jacobian::GroupAffine, ModelParameters}; +use ark_ec::{short_weierstrass_jacobian::GroupAffine, AffineCurve, ModelParameters}; use mina_curves::pasta::curves::{ pallas::{LegacyPallasParameters, PallasParameters}, vesta::{LegacyVestaParameters, VestaParameters}, }; use mina_poseidon::poseidon::ArithmeticSpongeParams; use once_cell::sync::Lazy; -use poly_commitment::{commitment::CommitmentCurve, srs::endos}; +use poly_commitment::{ + commitment::{CommitmentCurve, EndoCurve}, + srs::endos, +}; /// Represents additional information that a curve needs in order to be used with Kimchi -pub trait KimchiCurve: CommitmentCurve { +pub trait KimchiCurve: CommitmentCurve + EndoCurve { /// A human readable name. const NAME: &'static str; - /// The other curve that forms the cycle used for recursion. - type OtherCurve: KimchiCurve< - ScalarField = Self::BaseField, - BaseField = Self::ScalarField, - OtherCurve = Self, - >; - /// Provides the sponge params to be used with this curve. - /// If the params for the base field are needed, they can be obtained from [`KimchiCurve::OtherCurve`]. fn sponge_params() -> &'static ArithmeticSpongeParams; - /// Provides the coefficients for the curve endomorphism called (q,r) in some places. + /// Provides the sponge params to be used with the other curve. + fn other_curve_sponge_params() -> &'static ArithmeticSpongeParams; + + /// Provides the coefficients for the curve endomorphism, called (q,r) in some places. fn endos() -> &'static (Self::BaseField, Self::ScalarField); + + /// Provides the coefficient for the curve endomorphism over the other field, called q in some + /// places. + fn other_curve_endo() -> &'static Self::ScalarField; + + /// Accessor for the other curve's prime subgroup generator, as coordinates + // TODO: This leaked from snarky.rs. Stop the bleed. + fn other_curve_prime_subgroup_generator() -> (Self::ScalarField, Self::ScalarField); +} + +fn vesta_endos() -> &'static ( + ::BaseField, + ::ScalarField, +) { + static VESTA_ENDOS: Lazy<( + ::BaseField, + ::ScalarField, + )> = Lazy::new(endos::>); + &VESTA_ENDOS +} + +fn pallas_endos() -> &'static ( + ::BaseField, + ::ScalarField, +) { + static PALLAS_ENDOS: Lazy<( + ::BaseField, + ::ScalarField, + )> = Lazy::new(endos::>); + &PALLAS_ENDOS } impl KimchiCurve for GroupAffine { const NAME: &'static str = "vesta"; - type OtherCurve = GroupAffine; - fn sponge_params() -> &'static ArithmeticSpongeParams { mina_poseidon::pasta::fp_kimchi::static_params() } + fn other_curve_sponge_params() -> &'static ArithmeticSpongeParams { + mina_poseidon::pasta::fq_kimchi::static_params() + } + fn endos() -> &'static (Self::BaseField, Self::ScalarField) { - static VESTA_ENDOS: Lazy<( - ::BaseField, - ::ScalarField, - )> = Lazy::new(endos::>); - &VESTA_ENDOS + vesta_endos() + } + + fn other_curve_endo() -> &'static Self::ScalarField { + &pallas_endos().0 + } + + fn other_curve_prime_subgroup_generator() -> (Self::ScalarField, Self::ScalarField) { + GroupAffine::::prime_subgroup_generator() + .to_coordinates() + .unwrap() } } impl KimchiCurve for GroupAffine { const NAME: &'static str = "pallas"; - type OtherCurve = GroupAffine; - fn sponge_params() -> &'static ArithmeticSpongeParams { mina_poseidon::pasta::fq_kimchi::static_params() } + fn other_curve_sponge_params() -> &'static ArithmeticSpongeParams { + mina_poseidon::pasta::fp_kimchi::static_params() + } + fn endos() -> &'static (Self::BaseField, Self::ScalarField) { - static PALLAS_ENDOS: Lazy<( - ::BaseField, - ::ScalarField, - )> = Lazy::new(endos::>); - &PALLAS_ENDOS + pallas_endos() + } + + fn other_curve_endo() -> &'static Self::ScalarField { + &vesta_endos().0 + } + + fn other_curve_prime_subgroup_generator() -> (Self::ScalarField, Self::ScalarField) { + GroupAffine::::prime_subgroup_generator() + .to_coordinates() + .unwrap() } } @@ -73,27 +117,88 @@ impl KimchiCurve for GroupAffine { impl KimchiCurve for GroupAffine { const NAME: &'static str = "legacy_vesta"; - type OtherCurve = GroupAffine; - fn sponge_params() -> &'static ArithmeticSpongeParams { mina_poseidon::pasta::fp_legacy::static_params() } + fn other_curve_sponge_params() -> &'static ArithmeticSpongeParams { + mina_poseidon::pasta::fq_legacy::static_params() + } + fn endos() -> &'static (Self::BaseField, Self::ScalarField) { - GroupAffine::::endos() + vesta_endos() + } + + fn other_curve_endo() -> &'static Self::ScalarField { + &pallas_endos().0 + } + + fn other_curve_prime_subgroup_generator() -> (Self::ScalarField, Self::ScalarField) { + GroupAffine::::prime_subgroup_generator() + .to_coordinates() + .unwrap() } } impl KimchiCurve for GroupAffine { const NAME: &'static str = "legacy_pallas"; - type OtherCurve = GroupAffine; - fn sponge_params() -> &'static ArithmeticSpongeParams { mina_poseidon::pasta::fq_legacy::static_params() } + fn other_curve_sponge_params() -> &'static ArithmeticSpongeParams { + mina_poseidon::pasta::fp_legacy::static_params() + } + + fn endos() -> &'static (Self::BaseField, Self::ScalarField) { + pallas_endos() + } + + fn other_curve_endo() -> &'static Self::ScalarField { + &vesta_endos().0 + } + + fn other_curve_prime_subgroup_generator() -> (Self::ScalarField, Self::ScalarField) { + GroupAffine::::prime_subgroup_generator() + .to_coordinates() + .unwrap() + } +} + +#[cfg(feature = "bn254")] +use mina_poseidon::dummy_values::kimchi_dummy; + +#[cfg(feature = "bn254")] +impl KimchiCurve for GroupAffine { + const NAME: &'static str = "bn254"; + + fn sponge_params() -> &'static ArithmeticSpongeParams { + // TODO: Generate some params + static PARAMS: Lazy> = Lazy::new(kimchi_dummy); + &PARAMS + } + + fn other_curve_sponge_params() -> &'static ArithmeticSpongeParams { + // TODO: Generate some params + static PARAMS: Lazy> = Lazy::new(kimchi_dummy); + &PARAMS + } + fn endos() -> &'static (Self::BaseField, Self::ScalarField) { - GroupAffine::::endos() + static ENDOS: Lazy<(ark_bn254::Fq, ark_bn254::Fr)> = + Lazy::new(endos::); + &ENDOS + } + + fn other_curve_endo() -> &'static Self::ScalarField { + // TODO: Dummy value, this is definitely not right + static ENDO: Lazy = Lazy::new(|| 13u64.into()); + &ENDO + } + + fn other_curve_prime_subgroup_generator() -> (Self::ScalarField, Self::ScalarField) { + // TODO: Dummy value, this is definitely not right + (44u64.into(), 88u64.into()) } } diff --git a/kimchi/src/error.rs b/kimchi/src/error.rs index 37e2f29638..3c0b352eb7 100644 --- a/kimchi/src/error.rs +++ b/kimchi/src/error.rs @@ -10,6 +10,11 @@ pub enum ProverError { #[error("the circuit is too large")] NoRoomForZkInWitness, + #[error( + "there are not enough random rows to achieve zero-knowledge (expected: {0}, got: {1})" + )] + NotZeroKnowledge(usize, usize), + #[error("the witness columns are not all the same size")] WitnessCsInconsistent, @@ -22,9 +27,6 @@ pub enum ProverError { #[error("the lookup failed to find a match in the table: row={0}")] ValueNotInTable(usize), - #[error("SRS size is smaller than the domain size required by the circuit")] - SRSTooSmall, - #[error("the runtime tables provided did not match the index's configuration")] RuntimeTablesInconsistent, @@ -35,8 +37,8 @@ pub enum ProverError { /// Errors that can arise when verifying a proof #[derive(Error, Debug, Clone, Copy)] pub enum VerifyError { - #[error("the commitment to {0} is of an unexpected size")] - IncorrectCommitmentLength(&'static str), + #[error("the commitment to {0} is of an unexpected size (expected {1}, got {2})")] + IncorrectCommitmentLength(&'static str, usize, usize), #[error("the public input is of an unexpected size (expected {0})")] IncorrectPubicInputLength(usize), @@ -44,8 +46,10 @@ pub enum VerifyError { #[error("the previous challenges have an unexpected length (expected {0}, got {1})")] IncorrectPrevChallengesLength(usize, usize), - #[error("proof malformed: an evaluation was of the incorrect size (all evaluations are expected to be of length 1)")] - IncorrectEvaluationsLength, + #[error( + "proof malformed: an evaluation for {2} was of the incorrect size (expected {0}, got {1})" + )] + IncorrectEvaluationsLength(usize, usize, &'static str), #[error("the opening proof failed to verify")] OpenProof, @@ -69,10 +73,23 @@ pub enum VerifyError { IncorrectRuntimeProof, #[error("the evaluation for {0:?} is missing")] - MissingEvaluation(crate::circuits::expr::Column), + MissingEvaluation(crate::circuits::berkeley_columns::Column), + + #[error("the evaluation for PublicInput is missing")] + MissingPublicInputEvaluation, #[error("the commitment for {0:?} is missing")] - MissingCommitment(crate::circuits::expr::Column), + MissingCommitment(crate::circuits::berkeley_columns::Column), +} + +/// Errors that can arise when preparing the setup +#[derive(Error, Debug, Clone)] +pub enum DomainCreationError { + #[error("could not compute the size of domain for {0}")] + DomainSizeFailed(usize), + + #[error("construction of domain {0} for size {1} failed")] + DomainConstructionFailed(String, usize), } /// Errors that can arise when preparing the setup @@ -82,7 +99,7 @@ pub enum SetupError { ConstraintSystem(String), #[error("the domain could not be constructed: {0}")] - DomainCreation(&'static str), + DomainCreation(DomainCreationError), } /// Errors that can arise when creating a verifier index diff --git a/kimchi/src/lagrange_basis_evaluations.rs b/kimchi/src/lagrange_basis_evaluations.rs index 157fb5af2a..ee825a0ae1 100644 --- a/kimchi/src/lagrange_basis_evaluations.rs +++ b/kimchi/src/lagrange_basis_evaluations.rs @@ -5,39 +5,49 @@ use rayon::prelude::*; /// The evaluations of all normalized lagrange basis polynomials at a given /// point. Can be used to evaluate an `Evaluations` form polynomial at that point. pub struct LagrangeBasisEvaluations { - pub evals: Vec, + evals: Vec>, } impl LagrangeBasisEvaluations { /// Given the evaluations form of a polynomial, directly evaluate that polynomial at a point. - pub fn evaluate>(&self, p: &Evaluations) -> F { - assert_eq!(p.evals.len() % self.evals.len(), 0); - let stride = p.evals.len() / self.evals.len(); + pub fn evaluate>(&self, p: &Evaluations) -> Vec { + assert_eq!(p.evals.len() % self.evals[0].len(), 0); + let stride = p.evals.len() / self.evals[0].len(); let p_evals = &p.evals; (&self.evals) .into_par_iter() - .enumerate() - .map(|(i, e)| p_evals[stride * i] * e) - .sum() + .map(|evals| { + evals + .into_par_iter() + .enumerate() + .map(|(i, e)| p_evals[stride * i] * e) + .sum() + }) + .collect() } /// Given the evaluations form of a polynomial, directly evaluate that polynomial at a point, /// assuming that the given evaluations are either 0 or 1 at every point of the domain. - pub fn evaluate_boolean>(&self, p: &Evaluations) -> F { - assert_eq!(p.evals.len() % self.evals.len(), 0); - let stride = p.evals.len() / self.evals.len(); - let mut result = F::zero(); - for (i, e) in self.evals.iter().enumerate() { - if !p.evals[stride * i].is_zero() { - result += e; - } - } - result + pub fn evaluate_boolean>(&self, p: &Evaluations) -> Vec { + assert_eq!(p.evals.len() % self.evals[0].len(), 0); + let stride = p.evals.len() / self.evals[0].len(); + self.evals + .iter() + .map(|evals| { + let mut result = F::zero(); + for (i, e) in evals.iter().enumerate() { + if !p.evals[stride * i].is_zero() { + result += e; + } + } + result + }) + .collect() } /// Compute all evaluations of the normalized lagrange basis polynomials of the /// given domain at the given point. Runs in time O(domain size). - pub fn new(domain: D, x: F) -> LagrangeBasisEvaluations { + fn new_with_segment_size_1(domain: D, x: F) -> LagrangeBasisEvaluations { let n = domain.size(); // We want to compute for all i // s_i = 1 / t_i @@ -98,7 +108,40 @@ impl LagrangeBasisEvaluations { // Denominators now contains the desired result. LagrangeBasisEvaluations { - evals: denominators, + evals: vec![denominators], + } + } + + /// Compute all evaluations of the normalized lagrange basis polynomials of the + /// given domain at the given point. Runs in time O(n log(n)) for n = domain size. + fn new_with_chunked_segments( + max_poly_size: usize, + domain: D, + x: F, + ) -> LagrangeBasisEvaluations { + let n = domain.size(); + let num_chunks = n / max_poly_size; + let mut evals = Vec::with_capacity(num_chunks); + for i in 0..num_chunks { + let mut x_pow = F::one(); + let mut chunked_evals = vec![F::zero(); n]; + for j in 0..max_poly_size { + chunked_evals[i * max_poly_size + j] = x_pow; + x_pow *= x; + } + // This uses the same trick as `poly_commitment::srs::SRS::add_lagrange_basis`, but + // applied to field elements instead of group elements. + domain.ifft_in_place(&mut chunked_evals); + evals.push(chunked_evals); + } + LagrangeBasisEvaluations { evals } + } + + pub fn new(max_poly_size: usize, domain: D, x: F) -> LagrangeBasisEvaluations { + if domain.size() <= max_poly_size { + Self::new_with_segment_size_1(domain, x) + } else { + Self::new_with_chunked_segments(max_poly_size, domain, x) } } } @@ -118,19 +161,44 @@ mod tests { let domain = Radix2EvaluationDomain::new(n).unwrap(); let rng = &mut StdRng::from_seed([0u8; 32]); let x = Fp::rand(rng); - let evaluator = LagrangeBasisEvaluations::new(domain, x); + let evaluator = LagrangeBasisEvaluations::new(domain.size(), domain, x); let expected = (0..n).map(|i| { let mut lagrange_i = vec![Fp::zero(); n]; lagrange_i[i] = Fp::one(); - Evaluations::from_vec_and_domain(lagrange_i, domain) + vec![Evaluations::from_vec_and_domain(lagrange_i, domain) .interpolate() - .evaluate(&x) + .evaluate(&x)] }); - for (i, expected) in expected.enumerate() { - if evaluator.evals[i] != expected { - panic!("{}, {}: {} != {}", line!(), i, evaluator.evals[i], expected); + for (i, (expected, got)) in expected.zip(evaluator.evals).enumerate() { + for (j, (expected, got)) in expected.iter().zip(got.iter()).enumerate() { + if got != expected { + panic!("{}, {}, {}: {} != {}", line!(), i, j, got, expected); + } + } + } + } + + #[test] + fn test_new_with_chunked_segments() { + let n = 1 << 4; + let domain = Radix2EvaluationDomain::new(n).unwrap(); + let rng = &mut StdRng::from_seed([0u8; 32]); + let x = Fp::rand(rng); + let evaluator = LagrangeBasisEvaluations::new(domain.size(), domain, x); + let evaluator_chunked = + LagrangeBasisEvaluations::new_with_chunked_segments(domain.size(), domain, x); + for (i, (evals, evals_chunked)) in evaluator + .evals + .iter() + .zip(evaluator_chunked.evals.iter()) + .enumerate() + { + for (j, (evals, evals_chunked)) in evals.iter().zip(evals_chunked.iter()).enumerate() { + if evals != evals_chunked { + panic!("{}, {}, {}: {} != {}", line!(), i, j, evals, evals_chunked); + } } } } @@ -151,10 +219,10 @@ mod tests { let x = Fp::rand(rng); - let evaluator = LagrangeBasisEvaluations::new(domain, x); + let evaluator = LagrangeBasisEvaluations::new(domain.size(), domain, x); let y = evaluator.evaluate(&evals); - let expected = evals.interpolate().evaluate(&x); + let expected = vec![evals.interpolate().evaluate(&x)]; assert_eq!(y, expected) } @@ -179,10 +247,10 @@ mod tests { let x = Fp::rand(rng); - let evaluator = LagrangeBasisEvaluations::new(domain, x); + let evaluator = LagrangeBasisEvaluations::new(domain.size(), domain, x); let y = evaluator.evaluate_boolean(&evals); - let expected = evals.interpolate().evaluate(&x); + let expected = vec![evals.interpolate().evaluate(&x)]; assert_eq!(y, expected) } } diff --git a/kimchi/src/linearization.rs b/kimchi/src/linearization.rs index ec42689059..65ffe41284 100644 --- a/kimchi/src/linearization.rs +++ b/kimchi/src/linearization.rs @@ -6,7 +6,7 @@ use crate::circuits::expr; use crate::circuits::lookup; use crate::circuits::lookup::{ constraints::LookupConfiguration, - lookups::{LookupFeatures, LookupInfo, LookupPatterns}, + lookups::{LookupFeatures, LookupInfo, LookupPattern, LookupPatterns}, }; use crate::circuits::polynomials::{ complete_add::CompleteAdd, @@ -23,8 +23,9 @@ use crate::circuits::polynomials::{ }; use crate::circuits::{ + berkeley_columns::Column, constraints::FeatureFlags, - expr::{Column, ConstantExpr, Expr, FeatureFlag, Linearization, PolishToken}, + expr::{ConstantExpr, Expr, FeatureFlag, Linearization, PolishToken}, gate::GateType, wires::COLUMNS, }; @@ -38,7 +39,7 @@ use ark_ff::{FftField, PrimeField, SquareRootField, Zero}; pub fn constraints_expr( feature_flags: Option<&FeatureFlags>, generic: bool, -) -> (Expr>, Alphas) { +) -> (Expr, Column>, Alphas) { // register powers of alpha so that we don't reuse them across mutually inclusive constraints let mut powers_of_alpha = Alphas::::default(); @@ -305,6 +306,26 @@ pub fn linearization_columns( // the generic selector polynomial h.insert(Index(GateType::Generic)); + h.insert(Index(GateType::CompleteAdd)); + h.insert(Index(GateType::VarBaseMul)); + h.insert(Index(GateType::EndoMul)); + h.insert(Index(GateType::EndoMulScalar)); + + // optional columns + h.insert(Index(GateType::RangeCheck0)); + h.insert(Index(GateType::RangeCheck1)); + h.insert(Index(GateType::ForeignFieldAdd)); + h.insert(Index(GateType::ForeignFieldMul)); + h.insert(Index(GateType::Xor16)); + h.insert(Index(GateType::Rot64)); + + // lookup selectors + h.insert(LookupRuntimeSelector); + h.insert(LookupKindIndex(LookupPattern::Xor)); + h.insert(LookupKindIndex(LookupPattern::Lookup)); + h.insert(LookupKindIndex(LookupPattern::RangeCheck)); + h.insert(LookupKindIndex(LookupPattern::ForeignFieldMul)); + h } @@ -316,10 +337,14 @@ pub fn linearization_columns( /// # Panics /// /// Will panic if the `linearization` process fails. +#[allow(clippy::type_complexity)] pub fn expr_linearization( feature_flags: Option<&FeatureFlags>, generic: bool, -) -> (Linearization>>, Alphas) { +) -> ( + Linearization>, Column>, + Alphas, +) { let evaluated_cols = linearization_columns::(feature_flags); let (expr, powers_of_alpha) = constraints_expr(feature_flags, generic); @@ -329,5 +354,7 @@ pub fn expr_linearization( .unwrap() .map(|e| e.to_polish()); + assert_eq!(linearization.index_terms.len(), 0); + (linearization, powers_of_alpha) } diff --git a/kimchi/src/oracles.rs b/kimchi/src/oracles.rs index aaa91e186d..c36167ae0b 100644 --- a/kimchi/src/oracles.rs +++ b/kimchi/src/oracles.rs @@ -38,7 +38,7 @@ where #[cfg(feature = "ocaml_types")] pub mod caml { use ark_ff::PrimeField; - use poly_commitment::commitment::shift_scalar; + use poly_commitment::{commitment::shift_scalar, evaluation_proof::OpeningProof}; use crate::{ circuits::scalars::caml::CamlRandomOracles, curve::KimchiCurve, error::VerifyError, @@ -57,8 +57,8 @@ pub mod caml { pub fn create_caml_oracles( lgr_comm: Vec>, - index: VerifierIndex, - proof: ProverProof, + index: VerifierIndex>, + proof: ProverProof>, public_input: &[G::ScalarField], ) -> Result, VerifyError> where @@ -76,7 +76,7 @@ pub mod caml { let p_comm = PolyComm::::multi_scalar_mul(&lgr_comm_refs, &negated_public); let oracles_result = - proof.oracles::(&index, &p_comm, public_input)?; + proof.oracles::(&index, &p_comm, Some(public_input))?; let (mut sponge, combined_inner_product, public_evals, digest, oracles) = ( oracles_result.fq_sponge, diff --git a/kimchi/src/plonk_sponge.rs b/kimchi/src/plonk_sponge.rs index da54379f4d..8efefe8f64 100644 --- a/kimchi/src/plonk_sponge.rs +++ b/kimchi/src/plonk_sponge.rs @@ -5,7 +5,7 @@ use mina_poseidon::{ poseidon::{ArithmeticSponge, ArithmeticSpongeParams, Sponge}, }; -use crate::proof::{LookupEvaluations, PointEvaluations, ProofEvaluations}; +use crate::proof::{PointEvaluations, ProofEvaluations}; pub trait FrSponge { /// Creates a new Fr-Sponge. @@ -60,31 +60,96 @@ impl FrSponge for DefaultFrSponge { self.last_squeezed = vec![]; let ProofEvaluations { + public: _, // Must be absorbed first manually for now, to handle Mina annoyances w, z, s, coefficients, - lookup, generic_selector, poseidon_selector, + complete_add_selector, + mul_selector, + emul_selector, + endomul_scalar_selector, + range_check0_selector, + range_check1_selector, + foreign_field_add_selector, + foreign_field_mul_selector, + xor_selector, + rot_selector, + lookup_aggregation, + lookup_table, + lookup_sorted, + runtime_lookup_table, + runtime_lookup_table_selector, + xor_lookup_selector, + lookup_gate_lookup_selector, + range_check_lookup_selector, + foreign_field_mul_lookup_selector, } = e; - let mut points = vec![z, generic_selector, poseidon_selector]; + let mut points = vec![ + z, + generic_selector, + poseidon_selector, + complete_add_selector, + mul_selector, + emul_selector, + endomul_scalar_selector, + ]; w.iter().for_each(|w_i| points.push(w_i)); coefficients.iter().for_each(|c_i| points.push(c_i)); s.iter().for_each(|s_i| points.push(s_i)); - if let Some(l) = lookup.as_ref() { - let LookupEvaluations { - sorted, - aggreg, - table, - runtime, - } = l; - points.push(aggreg); - points.push(table); - sorted.iter().for_each(|s| points.push(s)); - runtime.iter().for_each(|x| points.push(x)); + // Optional gates + + if let Some(range_check0_selector) = range_check0_selector.as_ref() { + points.push(range_check0_selector) + } + if let Some(range_check1_selector) = range_check1_selector.as_ref() { + points.push(range_check1_selector) + } + if let Some(foreign_field_add_selector) = foreign_field_add_selector.as_ref() { + points.push(foreign_field_add_selector) + } + if let Some(foreign_field_mul_selector) = foreign_field_mul_selector.as_ref() { + points.push(foreign_field_mul_selector) + } + if let Some(xor_selector) = xor_selector.as_ref() { + points.push(xor_selector) + } + if let Some(rot_selector) = rot_selector.as_ref() { + points.push(rot_selector) + } + if let Some(lookup_aggregation) = lookup_aggregation.as_ref() { + points.push(lookup_aggregation) + } + if let Some(lookup_table) = lookup_table.as_ref() { + points.push(lookup_table) + } + for lookup_sorted in lookup_sorted { + if let Some(lookup_sorted) = lookup_sorted.as_ref() { + points.push(lookup_sorted) + } + } + if let Some(runtime_lookup_table) = runtime_lookup_table.as_ref() { + points.push(runtime_lookup_table) + } + if let Some(runtime_lookup_table_selector) = runtime_lookup_table_selector.as_ref() { + points.push(runtime_lookup_table_selector) + } + if let Some(xor_lookup_selector) = xor_lookup_selector.as_ref() { + points.push(xor_lookup_selector) + } + if let Some(lookup_gate_lookup_selector) = lookup_gate_lookup_selector.as_ref() { + points.push(lookup_gate_lookup_selector) + } + if let Some(range_check_lookup_selector) = range_check_lookup_selector.as_ref() { + points.push(range_check_lookup_selector) + } + if let Some(foreign_field_mul_lookup_selector) = foreign_field_mul_lookup_selector.as_ref() + { + points.push(foreign_field_mul_lookup_selector) } points.into_iter().for_each(|p| { diff --git a/kimchi/src/proof.rs b/kimchi/src/proof.rs index d335a5bfed..f7ace0c5fd 100644 --- a/kimchi/src/proof.rs +++ b/kimchi/src/proof.rs @@ -1,18 +1,16 @@ //! This module implements the data structures of a proof. use crate::circuits::{ - expr::Column, + berkeley_columns::Column, gate::GateType, + lookup::lookups::LookupPattern, wires::{COLUMNS, PERMUTS}, }; use ark_ec::AffineCurve; use ark_ff::{FftField, One, Zero}; use ark_poly::univariate::DensePolynomial; use o1_utils::ExtendedDensePolynomial; -use poly_commitment::{ - commitment::{b_poly, b_poly_coefficients, PolyComm}, - evaluation_proof::OpeningProof, -}; +use poly_commitment::commitment::{b_poly, b_poly_coefficients, PolyComm}; use serde::{Deserialize, Serialize}; use serde_with::serde_as; use std::array; @@ -38,22 +36,6 @@ pub struct PointEvaluations { pub zeta_omega: Evals, } -/// Evaluations of lookup polynomials -#[serde_as] -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct LookupEvaluations { - /// sorted lookup table polynomial - pub sorted: Vec, - /// lookup aggregation polynomial - pub aggreg: Evals, - // TODO: May be possible to optimize this away? - /// lookup table polynomial - pub table: Evals, - - /// Optionally, a runtime table polynomial. - pub runtime: Option, -} - // TODO: this should really be vectors here, perhaps create another type for chunked evaluations? /// Polynomial evaluations contained in a `ProverProof`. /// - **Chunked evaluations** `Field` is instantiated with vectors with a length that equals the length of the chunk @@ -61,6 +43,8 @@ pub struct LookupEvaluations { #[serde_as] #[derive(Debug, Clone, Serialize, Deserialize)] pub struct ProofEvaluations { + /// public input polynomials + pub public: Option, /// witness polynomials pub w: [Evals; COLUMNS], /// permutation polynomial @@ -70,12 +54,54 @@ pub struct ProofEvaluations { pub s: [Evals; PERMUTS - 1], /// coefficient polynomials pub coefficients: [Evals; COLUMNS], - /// lookup-related evaluations - pub lookup: Option>, /// evaluation of the generic selector polynomial pub generic_selector: Evals, /// evaluation of the poseidon selector polynomial pub poseidon_selector: Evals, + /// evaluation of the elliptic curve addition selector polynomial + pub complete_add_selector: Evals, + /// evaluation of the elliptic curve variable base scalar multiplication selector polynomial + pub mul_selector: Evals, + /// evaluation of the endoscalar multiplication selector polynomial + pub emul_selector: Evals, + /// evaluation of the endoscalar multiplication scalar computation selector polynomial + pub endomul_scalar_selector: Evals, + + // Optional gates + /// evaluation of the RangeCheck0 selector polynomial + pub range_check0_selector: Option, + /// evaluation of the RangeCheck1 selector polynomial + pub range_check1_selector: Option, + /// evaluation of the ForeignFieldAdd selector polynomial + pub foreign_field_add_selector: Option, + /// evaluation of the ForeignFieldMul selector polynomial + pub foreign_field_mul_selector: Option, + /// evaluation of the Xor selector polynomial + pub xor_selector: Option, + /// evaluation of the Rot selector polynomial + pub rot_selector: Option, + + // lookup-related evaluations + /// evaluation of lookup aggregation polynomial + pub lookup_aggregation: Option, + /// evaluation of lookup table polynomial + pub lookup_table: Option, + /// evaluation of lookup sorted polynomials + pub lookup_sorted: [Option; 5], + /// evaluation of runtime lookup table polynomial + pub runtime_lookup_table: Option, + + // lookup selectors + /// evaluation of the runtime lookup table selector polynomial + pub runtime_lookup_table_selector: Option, + /// evaluation of the Xor range check pattern selector polynomial + pub xor_lookup_selector: Option, + /// evaluation of the Lookup range check pattern selector polynomial + pub lookup_gate_lookup_selector: Option, + /// evaluation of the RangeCheck range check pattern selector polynomial + pub range_check_lookup_selector: Option, + /// evaluation of the ForeignFieldMul range check pattern selector polynomial + pub foreign_field_mul_lookup_selector: Option, } /// Commitments linked to the lookup feature @@ -110,12 +136,16 @@ pub struct ProverCommitments { #[serde_as] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(bound = "G: ark_serialize::CanonicalDeserialize + ark_serialize::CanonicalSerialize")] -pub struct ProverProof { +pub struct ProverProof { /// All the polynomial commitments required in the proof pub commitments: ProverCommitments, /// batched commitment opening proof - pub proof: OpeningProof, + #[serde(bound( + serialize = "OpeningProof: Serialize", + deserialize = "OpeningProof: Deserialize<'de>" + ))] + pub proof: OpeningProof, /// Two evaluations over a number of committed polynomials pub evals: ProofEvaluations>>, @@ -163,71 +193,97 @@ impl PointEvaluations { } } -impl LookupEvaluations { - pub fn map Eval2>(self, f: &FN) -> LookupEvaluations { - let LookupEvaluations { - sorted, - aggreg, - table, - runtime, - } = self; - LookupEvaluations { - sorted: sorted.into_iter().map(f).collect(), - aggreg: f(aggreg), - table: f(table), - runtime: runtime.map(f), - } - } - - pub fn map_ref Eval2>(&self, f: &FN) -> LookupEvaluations { - let LookupEvaluations { - sorted, - aggreg, - table, - runtime, - } = self; - LookupEvaluations { - sorted: sorted.iter().map(f).collect(), - aggreg: f(aggreg), - table: f(table), - runtime: runtime.as_ref().map(f), - } - } -} - impl ProofEvaluations { pub fn map Eval2>(self, f: &FN) -> ProofEvaluations { let ProofEvaluations { + public, w, z, s, coefficients, - lookup, generic_selector, poseidon_selector, + complete_add_selector, + mul_selector, + emul_selector, + endomul_scalar_selector, + range_check0_selector, + range_check1_selector, + foreign_field_add_selector, + foreign_field_mul_selector, + xor_selector, + rot_selector, + lookup_aggregation, + lookup_table, + lookup_sorted, + runtime_lookup_table, + runtime_lookup_table_selector, + xor_lookup_selector, + lookup_gate_lookup_selector, + range_check_lookup_selector, + foreign_field_mul_lookup_selector, } = self; ProofEvaluations { + public: public.map(f), w: w.map(f), z: f(z), s: s.map(f), coefficients: coefficients.map(f), - lookup: lookup.map(|x| LookupEvaluations::map(x, f)), generic_selector: f(generic_selector), poseidon_selector: f(poseidon_selector), + complete_add_selector: f(complete_add_selector), + mul_selector: f(mul_selector), + emul_selector: f(emul_selector), + endomul_scalar_selector: f(endomul_scalar_selector), + range_check0_selector: range_check0_selector.map(f), + range_check1_selector: range_check1_selector.map(f), + foreign_field_add_selector: foreign_field_add_selector.map(f), + foreign_field_mul_selector: foreign_field_mul_selector.map(f), + xor_selector: xor_selector.map(f), + rot_selector: rot_selector.map(f), + lookup_aggregation: lookup_aggregation.map(f), + lookup_table: lookup_table.map(f), + lookup_sorted: lookup_sorted.map(|x| x.map(f)), + runtime_lookup_table: runtime_lookup_table.map(f), + runtime_lookup_table_selector: runtime_lookup_table_selector.map(f), + xor_lookup_selector: xor_lookup_selector.map(f), + lookup_gate_lookup_selector: lookup_gate_lookup_selector.map(f), + range_check_lookup_selector: range_check_lookup_selector.map(f), + foreign_field_mul_lookup_selector: foreign_field_mul_lookup_selector.map(f), } } pub fn map_ref Eval2>(&self, f: &FN) -> ProofEvaluations { let ProofEvaluations { + public, w: [w0, w1, w2, w3, w4, w5, w6, w7, w8, w9, w10, w11, w12, w13, w14], z, s: [s0, s1, s2, s3, s4, s5], coefficients: [c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14], - lookup, generic_selector, poseidon_selector, + complete_add_selector, + mul_selector, + emul_selector, + endomul_scalar_selector, + range_check0_selector, + range_check1_selector, + foreign_field_add_selector, + foreign_field_mul_selector, + xor_selector, + rot_selector, + lookup_aggregation, + lookup_table, + lookup_sorted, + runtime_lookup_table, + runtime_lookup_table_selector, + xor_lookup_selector, + lookup_gate_lookup_selector, + range_check_lookup_selector, + foreign_field_mul_lookup_selector, } = self; ProofEvaluations { + public: public.as_ref().map(f), w: [ f(w0), f(w1), @@ -264,54 +320,27 @@ impl ProofEvaluations { f(c13), f(c14), ], - lookup: lookup.as_ref().map(|l| l.map_ref(f)), generic_selector: f(generic_selector), poseidon_selector: f(poseidon_selector), - } - } -} - -impl ProofEvaluations { - /// Transpose the `ProofEvaluations`. - /// - /// # Panics - /// - /// Will panic if `ProofEvaluation` is None. - pub fn transpose( - evals: [&ProofEvaluations; N], - ) -> ProofEvaluations<[&F; N]> { - let has_lookup = evals.iter().all(|e| e.lookup.is_some()); - let has_runtime = has_lookup - && evals - .iter() - .all(|e| e.lookup.as_ref().unwrap().runtime.is_some()); - - ProofEvaluations { - generic_selector: array::from_fn(|i| &evals[i].generic_selector), - poseidon_selector: array::from_fn(|i| &evals[i].poseidon_selector), - z: array::from_fn(|i| &evals[i].z), - w: array::from_fn(|j| array::from_fn(|i| &evals[i].w[j])), - s: array::from_fn(|j| array::from_fn(|i| &evals[i].s[j])), - coefficients: array::from_fn(|j| array::from_fn(|i| &evals[i].coefficients[j])), - lookup: if has_lookup { - let sorted_length = evals[0].lookup.as_ref().unwrap().sorted.len(); - Some(LookupEvaluations { - aggreg: array::from_fn(|i| &evals[i].lookup.as_ref().unwrap().aggreg), - table: array::from_fn(|i| &evals[i].lookup.as_ref().unwrap().table), - sorted: (0..sorted_length) - .map(|j| array::from_fn(|i| &evals[i].lookup.as_ref().unwrap().sorted[j])) - .collect(), - runtime: if has_runtime { - Some(array::from_fn(|i| { - evals[i].lookup.as_ref().unwrap().runtime.as_ref().unwrap() - })) - } else { - None - }, - }) - } else { - None - }, + complete_add_selector: f(complete_add_selector), + mul_selector: f(mul_selector), + emul_selector: f(emul_selector), + endomul_scalar_selector: f(endomul_scalar_selector), + range_check0_selector: range_check0_selector.as_ref().map(f), + range_check1_selector: range_check1_selector.as_ref().map(f), + foreign_field_add_selector: foreign_field_add_selector.as_ref().map(f), + foreign_field_mul_selector: foreign_field_mul_selector.as_ref().map(f), + xor_selector: xor_selector.as_ref().map(f), + rot_selector: rot_selector.as_ref().map(f), + lookup_aggregation: lookup_aggregation.as_ref().map(f), + lookup_table: lookup_table.as_ref().map(f), + lookup_sorted: array::from_fn(|i| lookup_sorted[i].as_ref().map(f)), + runtime_lookup_table: runtime_lookup_table.as_ref().map(f), + runtime_lookup_table_selector: runtime_lookup_table_selector.as_ref().map(f), + xor_lookup_selector: xor_lookup_selector.as_ref().map(f), + lookup_gate_lookup_selector: lookup_gate_lookup_selector.as_ref().map(f), + range_check_lookup_selector: range_check_lookup_selector.as_ref().map(f), + foreign_field_mul_lookup_selector: foreign_field_mul_lookup_selector.as_ref().map(f), } } } @@ -373,13 +402,32 @@ impl ProofEvaluations> { zeta_omega: next, }; ProofEvaluations { + public: Some(pt(F::zero(), F::zero())), w: array::from_fn(|i| pt(curr[i], next[i])), z: pt(F::zero(), F::zero()), s: array::from_fn(|_| pt(F::zero(), F::zero())), coefficients: array::from_fn(|_| pt(F::zero(), F::zero())), - lookup: None, generic_selector: pt(F::zero(), F::zero()), poseidon_selector: pt(F::zero(), F::zero()), + complete_add_selector: pt(F::zero(), F::zero()), + mul_selector: pt(F::zero(), F::zero()), + emul_selector: pt(F::zero(), F::zero()), + endomul_scalar_selector: pt(F::zero(), F::zero()), + range_check0_selector: None, + range_check1_selector: None, + foreign_field_add_selector: None, + foreign_field_mul_selector: None, + xor_selector: None, + rot_selector: None, + lookup_aggregation: None, + lookup_table: None, + lookup_sorted: array::from_fn(|_| None), + runtime_lookup_table: None, + runtime_lookup_table_selector: None, + xor_lookup_selector: None, + lookup_gate_lookup_selector: None, + range_check_lookup_selector: None, + foreign_field_mul_lookup_selector: None, } } } @@ -398,14 +446,33 @@ impl ProofEvaluations { match col { Column::Witness(i) => Some(&self.w[i]), Column::Z => Some(&self.z), - Column::LookupSorted(i) => Some(&self.lookup.as_ref()?.sorted[i]), - Column::LookupAggreg => Some(&self.lookup.as_ref()?.aggreg), - Column::LookupTable => Some(&self.lookup.as_ref()?.table), - Column::LookupKindIndex(_) => None, - Column::LookupRuntimeSelector => None, - Column::LookupRuntimeTable => Some(self.lookup.as_ref()?.runtime.as_ref()?), + Column::LookupSorted(i) => self.lookup_sorted[i].as_ref(), + Column::LookupAggreg => self.lookup_aggregation.as_ref(), + Column::LookupTable => self.lookup_table.as_ref(), + Column::LookupKindIndex(LookupPattern::Xor) => self.xor_lookup_selector.as_ref(), + Column::LookupKindIndex(LookupPattern::Lookup) => { + self.lookup_gate_lookup_selector.as_ref() + } + Column::LookupKindIndex(LookupPattern::RangeCheck) => { + self.range_check_lookup_selector.as_ref() + } + Column::LookupKindIndex(LookupPattern::ForeignFieldMul) => { + self.foreign_field_mul_lookup_selector.as_ref() + } + Column::LookupRuntimeSelector => self.runtime_lookup_table_selector.as_ref(), + Column::LookupRuntimeTable => self.runtime_lookup_table.as_ref(), Column::Index(GateType::Generic) => Some(&self.generic_selector), Column::Index(GateType::Poseidon) => Some(&self.poseidon_selector), + Column::Index(GateType::CompleteAdd) => Some(&self.complete_add_selector), + Column::Index(GateType::VarBaseMul) => Some(&self.mul_selector), + Column::Index(GateType::EndoMul) => Some(&self.emul_selector), + Column::Index(GateType::EndoMulScalar) => Some(&self.endomul_scalar_selector), + Column::Index(GateType::RangeCheck0) => self.range_check0_selector.as_ref(), + Column::Index(GateType::RangeCheck1) => self.range_check1_selector.as_ref(), + Column::Index(GateType::ForeignFieldAdd) => self.foreign_field_add_selector.as_ref(), + Column::Index(GateType::ForeignFieldMul) => self.foreign_field_mul_selector.as_ref(), + Column::Index(GateType::Xor16) => self.xor_selector.as_ref(), + Column::Index(GateType::Rot64) => self.rot_selector.as_ref(), Column::Index(_) => None, Column::Coefficient(i) => Some(&self.coefficients[i]), Column::Permutation(i) => Some(&self.s[i]), @@ -463,59 +530,6 @@ pub mod caml { } } - // - // CamlLookupEvaluations - // - - #[derive(Clone, ocaml::IntoValue, ocaml::FromValue, ocaml_gen::Struct)] - pub struct CamlLookupEvaluations { - pub sorted: Vec>>, - pub aggreg: PointEvaluations>, - pub table: PointEvaluations>, - pub runtime: Option>>, - } - - impl From>>> for CamlLookupEvaluations - where - F: Clone, - CamlF: From, - { - fn from(le: LookupEvaluations>>) -> Self { - Self { - sorted: le - .sorted - .into_iter() - .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())) - .collect(), - aggreg: le.aggreg.map(&|x| x.into_iter().map(Into::into).collect()), - table: le.table.map(&|x| x.into_iter().map(Into::into).collect()), - runtime: le - .runtime - .map(|r| r.map(&|r| r.into_iter().map(Into::into).collect())), - } - } - } - - impl From> for LookupEvaluations>> - where - F: From + Clone, - { - fn from(pe: CamlLookupEvaluations) -> Self { - Self { - sorted: pe - .sorted - .into_iter() - .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())) - .collect(), - aggreg: pe.aggreg.map(&|x| x.into_iter().map(Into::into).collect()), - table: pe.table.map(&|x| x.into_iter().map(Into::into).collect()), - runtime: pe - .runtime - .map(|r| r.map(&|r| r.into_iter().map(Into::into).collect())), - } - } - } - // // CamlProofEvaluations // @@ -566,17 +580,41 @@ pub mod caml { PointEvaluations>, PointEvaluations>, ), - pub lookup: Option>, pub generic_selector: PointEvaluations>, pub poseidon_selector: PointEvaluations>, + pub complete_add_selector: PointEvaluations>, + pub mul_selector: PointEvaluations>, + pub emul_selector: PointEvaluations>, + pub endomul_scalar_selector: PointEvaluations>, + + pub range_check0_selector: Option>>, + pub range_check1_selector: Option>>, + pub foreign_field_add_selector: Option>>, + pub foreign_field_mul_selector: Option>>, + pub xor_selector: Option>>, + pub rot_selector: Option>>, + pub lookup_aggregation: Option>>, + pub lookup_table: Option>>, + pub lookup_sorted: Vec>>>, + pub runtime_lookup_table: Option>>, + + pub runtime_lookup_table_selector: Option>>, + pub xor_lookup_selector: Option>>, + pub lookup_gate_lookup_selector: Option>>, + pub range_check_lookup_selector: Option>>, + pub foreign_field_mul_lookup_selector: Option>>, } // // ProofEvaluations> <-> CamlProofEvaluations // - impl From>>> for CamlProofEvaluations + impl From>>> + for ( + Option>>, + CamlProofEvaluations, + ) where F: Clone, CamlF: From, @@ -697,28 +735,104 @@ pub mod caml { .map(&|x| x.into_iter().map(Into::into).collect()), ); - Self { - w, - coefficients, - z: pe.z.map(&|x| x.into_iter().map(Into::into).collect()), - s, - generic_selector: pe - .generic_selector - .map(&|x| x.into_iter().map(Into::into).collect()), - poseidon_selector: pe - .poseidon_selector - .map(&|x| x.into_iter().map(Into::into).collect()), - lookup: pe.lookup.map(Into::into), - } + ( + pe.public + .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())), + CamlProofEvaluations { + w, + coefficients, + z: pe.z.map(&|x| x.into_iter().map(Into::into).collect()), + s, + generic_selector: pe + .generic_selector + .map(&|x| x.into_iter().map(Into::into).collect()), + poseidon_selector: pe + .poseidon_selector + .map(&|x| x.into_iter().map(Into::into).collect()), + complete_add_selector: pe + .complete_add_selector + .map(&|x| x.into_iter().map(Into::into).collect()), + mul_selector: pe + .mul_selector + .map(&|x| x.into_iter().map(Into::into).collect()), + emul_selector: pe + .emul_selector + .map(&|x| x.into_iter().map(Into::into).collect()), + endomul_scalar_selector: pe + .endomul_scalar_selector + .map(&|x| x.into_iter().map(Into::into).collect()), + range_check0_selector: pe + .range_check0_selector + .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())), + range_check1_selector: pe + .range_check1_selector + .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())), + foreign_field_add_selector: pe + .foreign_field_add_selector + .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())), + foreign_field_mul_selector: pe + .foreign_field_mul_selector + .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())), + xor_selector: pe + .xor_selector + .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())), + rot_selector: pe + .rot_selector + .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())), + lookup_aggregation: pe + .lookup_aggregation + .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())), + lookup_table: pe + .lookup_table + .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())), + lookup_sorted: pe + .lookup_sorted + .iter() + .map(|x| { + x.as_ref().map(|x| { + x.map_ref(&|x| x.clone().into_iter().map(Into::into).collect()) + }) + }) + .collect::>(), + runtime_lookup_table: pe + .runtime_lookup_table + .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())), + runtime_lookup_table_selector: pe + .runtime_lookup_table_selector + .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())), + xor_lookup_selector: pe + .xor_lookup_selector + .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())), + lookup_gate_lookup_selector: pe + .lookup_gate_lookup_selector + .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())), + range_check_lookup_selector: pe + .range_check_lookup_selector + .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())), + foreign_field_mul_lookup_selector: pe + .foreign_field_mul_lookup_selector + .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())), + }, + ) } } - impl From> for ProofEvaluations>> + impl + From<( + Option>>, + CamlProofEvaluations, + )> for ProofEvaluations>> where F: Clone, + CamlF: Clone, F: From, { - fn from(cpe: CamlProofEvaluations) -> Self { + fn from( + (public, cpe): ( + Option>>, + CamlProofEvaluations, + ), + ) -> Self { let w = [ cpe.w.0.map(&|x| x.into_iter().map(Into::into).collect()), cpe.w.1.map(&|x| x.into_iter().map(Into::into).collect()), @@ -793,6 +907,7 @@ pub mod caml { ]; Self { + public: public.map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())), w, coefficients, z: cpe.z.map(&|x| x.into_iter().map(Into::into).collect()), @@ -803,7 +918,68 @@ pub mod caml { poseidon_selector: cpe .poseidon_selector .map(&|x| x.into_iter().map(Into::into).collect()), - lookup: cpe.lookup.map(Into::into), + complete_add_selector: cpe + .complete_add_selector + .map(&|x| x.into_iter().map(Into::into).collect()), + mul_selector: cpe + .mul_selector + .map(&|x| x.into_iter().map(Into::into).collect()), + emul_selector: cpe + .emul_selector + .map(&|x| x.into_iter().map(Into::into).collect()), + endomul_scalar_selector: cpe + .endomul_scalar_selector + .map(&|x| x.into_iter().map(Into::into).collect()), + range_check0_selector: cpe + .range_check0_selector + .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())), + range_check1_selector: cpe + .range_check1_selector + .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())), + foreign_field_add_selector: cpe + .foreign_field_add_selector + .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())), + foreign_field_mul_selector: cpe + .foreign_field_mul_selector + .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())), + xor_selector: cpe + .xor_selector + .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())), + rot_selector: cpe + .rot_selector + .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())), + lookup_aggregation: cpe + .lookup_aggregation + .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())), + lookup_table: cpe + .lookup_table + .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())), + lookup_sorted: { + assert_eq!(cpe.lookup_sorted.len(), 5); // Invalid proof + array::from_fn(|i| { + cpe.lookup_sorted[i] + .as_ref() + .map(|x| x.clone().map(&|x| x.into_iter().map(Into::into).collect())) + }) + }, + runtime_lookup_table: cpe + .runtime_lookup_table + .map(|x| x.map(&|x| x.iter().map(|x| x.clone().into()).collect())), + runtime_lookup_table_selector: cpe + .runtime_lookup_table_selector + .map(|x| x.map(&|x| x.iter().map(|x| x.clone().into()).collect())), + xor_lookup_selector: cpe + .xor_lookup_selector + .map(|x| x.map(&|x| x.iter().map(|x| x.clone().into()).collect())), + lookup_gate_lookup_selector: cpe + .lookup_gate_lookup_selector + .map(|x| x.map(&|x| x.iter().map(|x| x.clone().into()).collect())), + range_check_lookup_selector: cpe + .range_check_lookup_selector + .map(|x| x.map(&|x| x.iter().map(|x| x.clone().into()).collect())), + foreign_field_mul_lookup_selector: cpe + .foreign_field_mul_lookup_selector + .map(|x| x.map(&|x| x.iter().map(|x| x.clone().into()).collect())), } } } diff --git a/kimchi/src/prover.rs b/kimchi/src/prover.rs index 9f6f8f701e..44c6571765 100644 --- a/kimchi/src/prover.rs +++ b/kimchi/src/prover.rs @@ -3,6 +3,7 @@ use crate::{ circuits::{ argument::{Argument, ArgumentType}, + constraints::zk_rows_strict_lower_bound, expr::{self, l0_1, Constants, Environment, LookupEnvironment}, gate::GateType, lookup::{self, runtime_tables::RuntimeTable, tables::combine_table_entry}, @@ -13,7 +14,6 @@ use crate::{ foreign_field_add::circuitgates::ForeignFieldAdd, foreign_field_mul::{self, circuitgates::ForeignFieldMul}, generic, permutation, - permutation::ZK_ROWS, poseidon::Poseidon, range_check::circuitgates::{RangeCheck0, RangeCheck1}, rot::Rot64, @@ -27,12 +27,12 @@ use crate::{ lagrange_basis_evaluations::LagrangeBasisEvaluations, plonk_sponge::FrSponge, proof::{ - LookupCommitments, LookupEvaluations, PointEvaluations, ProofEvaluations, - ProverCommitments, ProverProof, RecursionChallenge, + LookupCommitments, PointEvaluations, ProofEvaluations, ProverCommitments, ProverProof, + RecursionChallenge, }, prover_index::ProverIndex, + verifier_index::VerifierIndex, }; -use ark_ec::ProjectiveCurve; use ark_ff::{FftField, Field, One, PrimeField, UniformRand, Zero}; use ark_poly::{ univariate::DensePolynomial, EvaluationDomain, Evaluations, Polynomial, @@ -46,6 +46,7 @@ use poly_commitment::{ absorb_commitment, b_poly_coefficients, BlindedCommitment, CommitmentCurve, PolyComm, }, evaluation_proof::DensePolynomialOrEvaluations, + OpenProof, SRS as _, }; use rayon::prelude::*; use std::array; @@ -104,8 +105,15 @@ where aggreg_comm: Option>, aggreg8: Option>>, - /// The evaluations of the aggregation polynomial for the proof - eval: Option>>>, + // lookup-related evaluations + /// evaluation of lookup aggregation polynomial + pub lookup_aggregation_eval: Option>>, + /// evaluation of lookup table polynomial + pub lookup_table_eval: Option>>, + /// evaluation of lookup sorted polynomials + pub lookup_sorted_eval: [Option>>; 5], + /// evaluation of runtime lookup table polynomial + pub runtime_lookup_table_eval: Option>>, /// Runtime table runtime_table: Option>, @@ -114,7 +122,7 @@ where runtime_second_col_d8: Option>>, } -impl ProverProof +impl> ProverProof where G::BaseField: PrimeField, { @@ -130,8 +138,11 @@ where groupmap: &G::Map, witness: [Vec; COLUMNS], runtime_tables: &[RuntimeTable], - index: &ProverIndex, - ) -> Result { + index: &ProverIndex, + ) -> Result + where + VerifierIndex: Clone, + { Self::create_recursive::( groupmap, witness, @@ -158,20 +169,24 @@ where group_map: &G::Map, mut witness: [Vec; COLUMNS], runtime_tables: &[RuntimeTable], - index: &ProverIndex, + index: &ProverIndex, prev_challenges: Vec>, blinders: Option<[Option>; COLUMNS]>, - ) -> Result { + ) -> Result + where + VerifierIndex: Clone, + { internal_tracing::checkpoint!(internal_traces; create_recursive); - - // make sure that the SRS is not smaller than the domain size let d1_size = index.cs.domain.d1.size(); - if index.srs.max_degree() < d1_size { - return Err(ProverError::SRSTooSmall); - } let (_, endo_r) = G::endos(); + let num_chunks = if d1_size < index.max_poly_size { + 1 + } else { + d1_size / index.max_poly_size + }; + // TODO: rng should be passed as arg let rng = &mut rand::rngs::OsRng; @@ -185,19 +200,27 @@ where //~ 1. Ensure we have room in the witness for the zero-knowledge rows. //~ We currently expect the witness not to be of the same length as the domain, //~ but instead be of the length of the (smaller) circuit. - //~ If we cannot add `ZK_ROWS` rows to the columns of the witness before reaching + //~ If we cannot add `zk_rows` rows to the columns of the witness before reaching //~ the size of the domain, abort. let length_witness = witness[0].len(); let length_padding = d1_size .checked_sub(length_witness) .ok_or(ProverError::NoRoomForZkInWitness)?; - if length_padding < ZK_ROWS as usize { + let zero_knowledge_limit = zk_rows_strict_lower_bound(num_chunks); + if (index.cs.zk_rows as usize) < zero_knowledge_limit { + return Err(ProverError::NotZeroKnowledge( + zero_knowledge_limit, + index.cs.zk_rows as usize, + )); + } + + if length_padding < index.cs.zk_rows as usize { return Err(ProverError::NoRoomForZkInWitness); } //~ 1. Pad the witness columns with Zero gates to make them the same length as the domain. - //~ Then, randomize the last `ZK_ROWS` of each columns. + //~ Then, randomize the last `zk_rows` of each columns. internal_tracing::checkpoint!(internal_traces; pad_witness); for w in &mut witness { if w.len() != length_witness { @@ -208,14 +231,14 @@ where w.extend(std::iter::repeat(G::ScalarField::zero()).take(length_padding)); // zk-rows - for row in w.iter_mut().rev().take(ZK_ROWS as usize) { + for row in w.iter_mut().rev().take(index.cs.zk_rows as usize) { *row = ::rand(rng); } } //~ 1. Setup the Fq-Sponge. internal_tracing::checkpoint!(internal_traces; set_up_fq_sponge); - let mut fq_sponge = EFqSponge::new(G::OtherCurve::sponge_params()); + let mut fq_sponge = EFqSponge::new(G::other_curve_sponge_params()); //~ 1. Absorb the digest of the VerifierIndex. let verifier_index_digest = index.verifier_index_digest::(); @@ -237,7 +260,7 @@ where .interpolate(); //~ 1. Commit (non-hiding) to the negated public input polynomial. - let public_comm = index.srs.commit_non_hiding(&public_poly, None); + let public_comm = index.srs.commit_non_hiding(&public_poly, num_chunks, None); let public_comm = { index .srs @@ -351,7 +374,7 @@ where } // zero-knowledge - for e in evals.iter_mut().rev().take(ZK_ROWS as usize) { + for e in evals.iter_mut().rev().take(index.cs.zk_rows as usize) { *e = ::rand(rng); } @@ -367,7 +390,10 @@ where // commit the runtime polynomial // (and save it to the proof) - let runtime_table_comm = index.srs.commit(&runtime_table_contribution, None, rng); + let runtime_table_comm = + index + .srs + .commit(&runtime_table_contribution, num_chunks, None, rng); // absorb the commitment absorb_commitment(&mut fq_sponge, &runtime_table_comm.commitment); @@ -485,13 +511,21 @@ where joint_combiner, table_id_combiner, &lcs.configuration.lookup_info, + index.cs.zk_rows as usize, )?; //~~ * Randomize the last `EVALS` rows in each of the sorted polynomials //~~ in order to add zero-knowledge to the protocol. let sorted: Vec<_> = sorted .into_iter() - .map(|chunk| lookup::constraints::zk_patch(chunk, index.cs.domain.d1, rng)) + .map(|chunk| { + lookup::constraints::zk_patch( + chunk, + index.cs.domain.d1, + index.cs.zk_rows as usize, + rng, + ) + }) .collect(); //~~ * Commit each of the sorted polynomials. @@ -546,6 +580,7 @@ where lookup_context.sorted.as_ref().unwrap(), rng, &lcs.configuration.lookup_info, + index.cs.zk_rows as usize, )?; //~~ * Commit to the aggregation polynomial. @@ -572,7 +607,7 @@ where let z_poly = index.perm_aggreg(&witness, &beta, &gamma, rng)?; //~ 1. Commit (hidding) to the permutation aggregation polynomial $z$. - let z_comm = index.srs.commit(&z_poly, None, rng); + let z_comm = index.srs.commit(&z_poly, num_chunks, None, rng); //~ 1. Absorb the permutation aggregation polynomial $z$ with the Fq-Sponge. absorb_commitment(&mut fq_sponge, &z_comm.commitment); @@ -675,10 +710,14 @@ where joint_combiner: lookup_context.joint_combiner, endo_coefficient: index.cs.endo, mds, + zk_rows: index.cs.zk_rows, }, witness: &lagrange.d8.this.w, coefficient: &index.column_evaluations.coefficients8, - vanishes_on_last_4_rows: &index.cs.precomputations().vanishes_on_last_4_rows, + vanishes_on_zero_knowledge_and_previous_rows: &index + .cs + .precomputations() + .vanishes_on_zero_knowledge_and_previous_rows, z: &lagrange.d8.this.z, l0_1: l0_1(index.cs.domain.d1), domain: index.cs.domain, @@ -827,25 +866,7 @@ where }; //~ 1. commit (hiding) to the quotient polynomial $t$ - //~ TODO: specify the dummies - let t_comm = { - let mut t_comm = index.srs.commit("ient_poly, None, rng); - - let expected_t_size = PERMUTS; - let dummies = expected_t_size - t_comm.commitment.unshifted.len(); - // Add `dummies` many hiding commitments to the 0 polynomial, since if the - // number of commitments in `t_comm` is less than the max size, it means that - // the higher degree coefficients of `t` are 0. - for _ in 0..dummies { - let w = ::rand(rng); - t_comm - .commitment - .unshifted - .push(index.srs.h.mul(w).into_affine()); - t_comm.blinders.unshifted.push(w); - } - t_comm - }; + let t_comm = { index.srs.commit("ient_poly, 7 * num_chunks, None, rng) }; //~ 1. Absorb the the commitment of the quotient polynomial with the Fq-Sponge. absorb_commitment(&mut fq_sponge, &t_comm.commitment); @@ -866,7 +887,7 @@ where .aggreg_coeffs .as_ref() .unwrap() - .to_chunked_polynomial(index.max_poly_size); + .to_chunked_polynomial(num_chunks, index.max_poly_size); //~~ * the sorted polynomials let sorted = lookup_context @@ -874,35 +895,41 @@ where .as_ref() .unwrap() .iter() - .map(|c| c.to_chunked_polynomial(index.max_poly_size)); + .map(|c| c.to_chunked_polynomial(num_chunks, index.max_poly_size)) + .collect::>(); //~~ * the table polynonial let joint_table = lookup_context.joint_lookup_table.as_ref().unwrap(); - let joint_table = joint_table.to_chunked_polynomial(index.max_poly_size); + let joint_table = joint_table.to_chunked_polynomial(num_chunks, index.max_poly_size); - lookup_context.eval = Some(LookupEvaluations { - aggreg: PointEvaluations { - zeta: aggreg.evaluate_chunks(zeta), - zeta_omega: aggreg.evaluate_chunks(zeta_omega), - }, - sorted: sorted - .map(|sorted| PointEvaluations { + lookup_context.lookup_aggregation_eval = Some(PointEvaluations { + zeta: aggreg.evaluate_chunks(zeta), + zeta_omega: aggreg.evaluate_chunks(zeta_omega), + }); + lookup_context.lookup_table_eval = Some(PointEvaluations { + zeta: joint_table.evaluate_chunks(zeta), + zeta_omega: joint_table.evaluate_chunks(zeta_omega), + }); + lookup_context.lookup_sorted_eval = array::from_fn(|i| { + if i < sorted.len() { + let sorted = &sorted[i]; + Some(PointEvaluations { zeta: sorted.evaluate_chunks(zeta), zeta_omega: sorted.evaluate_chunks(zeta_omega), }) - .collect(), - table: PointEvaluations { - zeta: joint_table.evaluate_chunks(zeta), - zeta_omega: joint_table.evaluate_chunks(zeta_omega), - }, - runtime: lookup_context.runtime_table.as_ref().map(|runtime_table| { - let runtime_table = runtime_table.to_chunked_polynomial(index.max_poly_size); + } else { + None + } + }); + lookup_context.runtime_lookup_table_eval = + lookup_context.runtime_table.as_ref().map(|runtime_table| { + let runtime_table = + runtime_table.to_chunked_polynomial(num_chunks, index.max_poly_size); PointEvaluations { zeta: runtime_table.evaluate_chunks(zeta), zeta_omega: runtime_table.evaluate_chunks(zeta_omega), } - }), - }) + }); } //~ 1. Chunk evaluate the following polynomials at both $\zeta$ and $\zeta \omega$: @@ -925,25 +952,33 @@ where //~ TODO: do we want to specify more on that? It seems unecessary except for the t polynomial (or if for some reason someone sets that to a low value) internal_tracing::checkpoint!(internal_traces; lagrange_basis_eval_zeta_poly); - let zeta_evals = LagrangeBasisEvaluations::new(index.cs.domain.d1, zeta); + let zeta_evals = + LagrangeBasisEvaluations::new(index.max_poly_size, index.cs.domain.d1, zeta); internal_tracing::checkpoint!(internal_traces; lagrange_basis_eval_zeta_omega_poly); - - let zeta_omega_evals = LagrangeBasisEvaluations::new(index.cs.domain.d1, zeta_omega); + let zeta_omega_evals = + LagrangeBasisEvaluations::new(index.max_poly_size, index.cs.domain.d1, zeta_omega); let chunked_evals_for_selector = |p: &Evaluations>| PointEvaluations { - zeta: vec![zeta_evals.evaluate_boolean(p)], - zeta_omega: vec![zeta_omega_evals.evaluate_boolean(p)], + zeta: zeta_evals.evaluate_boolean(p), + zeta_omega: zeta_omega_evals.evaluate_boolean(p), }; let chunked_evals_for_evaluations = |p: &Evaluations>| PointEvaluations { - zeta: vec![zeta_evals.evaluate(p)], - zeta_omega: vec![zeta_omega_evals.evaluate(p)], + zeta: zeta_evals.evaluate(p), + zeta_omega: zeta_omega_evals.evaluate(p), }; internal_tracing::checkpoint!(internal_traces; chunk_eval_zeta_omega_poly); let chunked_evals = ProofEvaluations::>> { + public: { + let chunked = public_poly.to_chunked_polynomial(num_chunks, index.max_poly_size); + Some(PointEvaluations { + zeta: chunked.evaluate_chunks(zeta), + zeta_omega: chunked.evaluate_chunks(zeta_omega), + }) + }, s: array::from_fn(|i| { chunked_evals_for_evaluations( &index.column_evaluations.permutation_coefficients8[i], @@ -953,7 +988,8 @@ where chunked_evals_for_evaluations(&index.column_evaluations.coefficients8[i]) }), w: array::from_fn(|i| { - let chunked = witness_poly[i].to_chunked_polynomial(index.max_poly_size); + let chunked = + witness_poly[i].to_chunked_polynomial(num_chunks, index.max_poly_size); PointEvaluations { zeta: chunked.evaluate_chunks(zeta), zeta_omega: chunked.evaluate_chunks(zeta_omega), @@ -961,20 +997,100 @@ where }), z: { - let chunked = z_poly.to_chunked_polynomial(index.max_poly_size); + let chunked = z_poly.to_chunked_polynomial(num_chunks, index.max_poly_size); PointEvaluations { zeta: chunked.evaluate_chunks(zeta), zeta_omega: chunked.evaluate_chunks(zeta_omega), } }, - lookup: lookup_context.eval.take(), + lookup_aggregation: lookup_context.lookup_aggregation_eval.take(), + lookup_table: lookup_context.lookup_table_eval.take(), + lookup_sorted: array::from_fn(|i| lookup_context.lookup_sorted_eval[i].take()), + runtime_lookup_table: lookup_context.runtime_lookup_table_eval.take(), generic_selector: chunked_evals_for_selector( &index.column_evaluations.generic_selector4, ), poseidon_selector: chunked_evals_for_selector( &index.column_evaluations.poseidon_selector8, ), + complete_add_selector: chunked_evals_for_selector( + &index.column_evaluations.complete_add_selector4, + ), + mul_selector: chunked_evals_for_selector(&index.column_evaluations.mul_selector8), + emul_selector: chunked_evals_for_selector(&index.column_evaluations.emul_selector8), + endomul_scalar_selector: chunked_evals_for_selector( + &index.column_evaluations.endomul_scalar_selector8, + ), + + range_check0_selector: index + .column_evaluations + .range_check0_selector8 + .as_ref() + .map(chunked_evals_for_selector), + range_check1_selector: index + .column_evaluations + .range_check1_selector8 + .as_ref() + .map(chunked_evals_for_selector), + foreign_field_add_selector: index + .column_evaluations + .foreign_field_add_selector8 + .as_ref() + .map(chunked_evals_for_selector), + foreign_field_mul_selector: index + .column_evaluations + .foreign_field_mul_selector8 + .as_ref() + .map(chunked_evals_for_selector), + xor_selector: index + .column_evaluations + .xor_selector8 + .as_ref() + .map(chunked_evals_for_selector), + rot_selector: index + .column_evaluations + .rot_selector8 + .as_ref() + .map(chunked_evals_for_selector), + + runtime_lookup_table_selector: index.cs.lookup_constraint_system.as_ref().and_then( + |lcs| { + lcs.runtime_selector + .as_ref() + .map(chunked_evals_for_selector) + }, + ), + xor_lookup_selector: index.cs.lookup_constraint_system.as_ref().and_then(|lcs| { + lcs.lookup_selectors + .xor + .as_ref() + .map(chunked_evals_for_selector) + }), + lookup_gate_lookup_selector: index.cs.lookup_constraint_system.as_ref().and_then( + |lcs| { + lcs.lookup_selectors + .lookup + .as_ref() + .map(chunked_evals_for_selector) + }, + ), + range_check_lookup_selector: index.cs.lookup_constraint_system.as_ref().and_then( + |lcs| { + lcs.lookup_selectors + .range_check + .as_ref() + .map(chunked_evals_for_selector) + }, + ), + foreign_field_mul_lookup_selector: index.cs.lookup_constraint_system.as_ref().and_then( + |lcs| { + lcs.lookup_selectors + .ffmul + .as_ref() + .map(chunked_evals_for_selector) + }, + ), }; let zeta_to_srs_len = zeta.pow([index.max_poly_size as u64]); @@ -1016,12 +1132,12 @@ where drop(env); // see https://o1-labs.github.io/mina-book/crypto/plonk/maller_15.html#the-prover-side - f.to_chunked_polynomial(index.max_poly_size) + f.to_chunked_polynomial(num_chunks, index.max_poly_size) .linearize(zeta_to_srs_len) }; let t_chunked = quotient_poly - .to_chunked_polynomial(index.max_poly_size) + .to_chunked_polynomial(7 * num_chunks, index.max_poly_size) .linearize(zeta_to_srs_len); &f_chunked - &t_chunked.scale(zeta_to_domain_size - G::ScalarField::one()) @@ -1077,16 +1193,6 @@ where }) .collect::>(); - //~ 1. Evaluate the negated public polynomial (if present) at $\zeta$ and $\zeta\omega$. - let public_evals = if public_poly.is_zero() { - [vec![G::ScalarField::zero()], vec![G::ScalarField::zero()]] - } else { - [ - vec![public_poly.evaluate(&zeta)], - vec![public_poly.evaluate(&zeta_omega)], - ] - }; - //~ 1. Absorb the unique evaluation of ft: $ft(\zeta\omega)$. fr_sponge.absorb(&ft_eval1); @@ -1097,8 +1203,8 @@ where //~~ * poseidon selector //~~ * the 15 register/witness //~~ * 6 sigmas evaluations (the last one is not evaluated) - fr_sponge.absorb_multiple(&public_evals[0]); - fr_sponge.absorb_multiple(&public_evals[1]); + fr_sponge.absorb_multiple(&chunked_evals.public.as_ref().unwrap().zeta); + fr_sponge.absorb_multiple(&chunked_evals.public.as_ref().unwrap().zeta_omega); fr_sponge.absorb_evaluations(&chunked_evals); //~ 1. Sample $v'$ with the Fr-Sponge @@ -1142,19 +1248,42 @@ where //~~ * the poseidon selector //~~ * the 15 registers/witness columns //~~ * the 6 sigmas - //~~ * optionally, the runtime table - polynomials.push((coefficients_form(&public_poly), None, fixed_hiding(1))); + polynomials.push(( + coefficients_form(&public_poly), + None, + fixed_hiding(num_chunks), + )); polynomials.push((coefficients_form(&ft), None, blinding_ft)); polynomials.push((coefficients_form(&z_poly), None, z_comm.blinders)); polynomials.push(( evaluations_form(&index.column_evaluations.generic_selector4), None, - fixed_hiding(1), + fixed_hiding(num_chunks), )); polynomials.push(( evaluations_form(&index.column_evaluations.poseidon_selector8), None, - fixed_hiding(1), + fixed_hiding(num_chunks), + )); + polynomials.push(( + evaluations_form(&index.column_evaluations.complete_add_selector4), + None, + fixed_hiding(num_chunks), + )); + polynomials.push(( + evaluations_form(&index.column_evaluations.mul_selector8), + None, + fixed_hiding(num_chunks), + )); + polynomials.push(( + evaluations_form(&index.column_evaluations.emul_selector8), + None, + fixed_hiding(num_chunks), + )); + polynomials.push(( + evaluations_form(&index.column_evaluations.endomul_scalar_selector8), + None, + fixed_hiding(num_chunks), )); polynomials.extend( witness_poly @@ -1168,16 +1297,73 @@ where .column_evaluations .coefficients8 .iter() - .map(|coefficientm| (evaluations_form(coefficientm), None, non_hiding(1))) + .map(|coefficientm| (evaluations_form(coefficientm), None, non_hiding(num_chunks))) .collect::>(), ); polynomials.extend( index.column_evaluations.permutation_coefficients8[0..PERMUTS - 1] .iter() - .map(|w| (evaluations_form(w), None, non_hiding(1))) + .map(|w| (evaluations_form(w), None, non_hiding(num_chunks))) .collect::>(), ); + //~~ * the optional gates + if let Some(range_check0_selector8) = + index.column_evaluations.range_check0_selector8.as_ref() + { + polynomials.push(( + evaluations_form(range_check0_selector8), + None, + non_hiding(num_chunks), + )); + } + if let Some(range_check1_selector8) = + index.column_evaluations.range_check1_selector8.as_ref() + { + polynomials.push(( + evaluations_form(range_check1_selector8), + None, + non_hiding(num_chunks), + )); + } + if let Some(foreign_field_add_selector8) = index + .column_evaluations + .foreign_field_add_selector8 + .as_ref() + { + polynomials.push(( + evaluations_form(foreign_field_add_selector8), + None, + non_hiding(num_chunks), + )); + } + if let Some(foreign_field_mul_selector8) = index + .column_evaluations + .foreign_field_mul_selector8 + .as_ref() + { + polynomials.push(( + evaluations_form(foreign_field_mul_selector8), + None, + non_hiding(num_chunks), + )); + } + if let Some(xor_selector8) = index.column_evaluations.xor_selector8.as_ref() { + polynomials.push(( + evaluations_form(xor_selector8), + None, + non_hiding(num_chunks), + )); + } + if let Some(rot_selector8) = index.column_evaluations.rot_selector8.as_ref() { + polynomials.push(( + evaluations_form(rot_selector8), + None, + non_hiding(num_chunks), + )); + } + + //~~ * optionally, the runtime table //~ 1. if using lookup: if let Some(lcs) = &index.cs.lookup_constraint_system { //~~ * add the lookup sorted polynomials @@ -1202,14 +1388,19 @@ where let runtime_comm = lookup_context.runtime_table_comm.as_ref().unwrap(); let joint_combiner = lookup_context.joint_combiner.as_ref().unwrap(); - let blinding = runtime_comm.blinders.unshifted[0]; + let unshifted = runtime_comm + .blinders + .unshifted + .iter() + .map(|blinding| *joint_combiner * blinding) + .collect(); PolyComm { - unshifted: vec![*joint_combiner * blinding], + unshifted, shifted: None, } } else { - non_hiding(1) + non_hiding(num_chunks) }; let joint_lookup_table = lookup_context.joint_lookup_table.as_ref().unwrap(); @@ -1227,11 +1418,42 @@ where runtime_table_comm.blinders.clone(), )); } + + //~~ * the lookup selectors + + if let Some(runtime_lookup_table_selector) = lcs.runtime_selector.as_ref() { + polynomials.push(( + evaluations_form(runtime_lookup_table_selector), + None, + non_hiding(1), + )) + } + if let Some(xor_lookup_selector) = lcs.lookup_selectors.xor.as_ref() { + polynomials.push((evaluations_form(xor_lookup_selector), None, non_hiding(1))) + } + if let Some(lookup_gate_selector) = lcs.lookup_selectors.lookup.as_ref() { + polynomials.push((evaluations_form(lookup_gate_selector), None, non_hiding(1))) + } + if let Some(range_check_lookup_selector) = lcs.lookup_selectors.range_check.as_ref() { + polynomials.push(( + evaluations_form(range_check_lookup_selector), + None, + non_hiding(1), + )) + } + if let Some(foreign_field_mul_lookup_selector) = lcs.lookup_selectors.ffmul.as_ref() { + polynomials.push(( + evaluations_form(foreign_field_mul_lookup_selector), + None, + non_hiding(1), + )) + } } //~ 1. Create an aggregated evaluation proof for all of these polynomials at $\zeta$ and $\zeta\omega$ using $u$ and $v$. internal_tracing::checkpoint!(internal_traces; create_aggregated_evaluation_proof); - let proof = index.srs.open( + let proof = OpenProof::open( + &*index.srs, group_map, &polynomials, &[zeta, zeta_omega], @@ -1294,11 +1516,20 @@ pub mod caml { use super::*; use crate::proof::caml::{CamlProofEvaluations, CamlRecursionChallenge}; use ark_ec::AffineCurve; - use poly_commitment::commitment::caml::{CamlOpeningProof, CamlPolyComm}; + use poly_commitment::{ + commitment::caml::{CamlOpeningProof, CamlPolyComm}, + evaluation_proof::OpeningProof, + }; #[cfg(feature = "internal_tracing")] pub use internal_traces::caml::CamlTraces as CamlProverTraces; + #[derive(ocaml::IntoValue, ocaml::FromValue, ocaml_gen::Struct)] + pub struct CamlProofWithPublic { + pub public_evals: Option>>, + pub proof: CamlProverProof, + } + // // CamlProverProof // @@ -1496,37 +1727,50 @@ pub mod caml { } // - // ProverProof <-> CamlProverProof + // ProverProof <-> CamlProofWithPublic // - impl From<(ProverProof, Vec)> for CamlProverProof + impl From<(ProverProof>, Vec)> + for CamlProofWithPublic where G: AffineCurve, CamlG: From, CamlF: From, { - fn from(pp: (ProverProof, Vec)) -> Self { - Self { - commitments: pp.0.commitments.into(), - proof: pp.0.proof.into(), - evals: pp.0.evals.into(), - ft_eval1: pp.0.ft_eval1.into(), - public: pp.1.into_iter().map(Into::into).collect(), - prev_challenges: pp.0.prev_challenges.into_iter().map(Into::into).collect(), + fn from(pp: (ProverProof>, Vec)) -> Self { + let (public_evals, evals) = pp.0.evals.into(); + CamlProofWithPublic { + public_evals, + proof: CamlProverProof { + commitments: pp.0.commitments.into(), + proof: pp.0.proof.into(), + evals, + ft_eval1: pp.0.ft_eval1.into(), + public: pp.1.into_iter().map(Into::into).collect(), + prev_challenges: pp.0.prev_challenges.into_iter().map(Into::into).collect(), + }, } } } - impl From> for (ProverProof, Vec) + impl From> + for (ProverProof>, Vec) where + CamlF: Clone, G: AffineCurve + From, G::ScalarField: From, { - fn from(caml_pp: CamlProverProof) -> (ProverProof, Vec) { + fn from( + caml_pp: CamlProofWithPublic, + ) -> (ProverProof>, Vec) { + let CamlProofWithPublic { + public_evals, + proof: caml_pp, + } = caml_pp; let proof = ProverProof { commitments: caml_pp.commitments.into(), proof: caml_pp.proof.into(), - evals: caml_pp.evals.into(), + evals: (public_evals, caml_pp.evals).into(), ft_eval1: caml_pp.ft_eval1.into(), prev_challenges: caml_pp .prev_challenges diff --git a/kimchi/src/prover_index.rs b/kimchi/src/prover_index.rs index 8140703408..05db4f5b6b 100644 --- a/kimchi/src/prover_index.rs +++ b/kimchi/src/prover_index.rs @@ -3,6 +3,7 @@ use crate::{ alphas::Alphas, circuits::{ + berkeley_columns::Column, constraints::{ColumnEvaluations, ConstraintSystem}, expr::{Linearization, PolishToken}, }, @@ -10,9 +11,9 @@ use crate::{ linearization::expr_linearization, verifier_index::VerifierIndex, }; -use ark_poly::EvaluationDomain; +use ark_ff::PrimeField; use mina_poseidon::FqSponge; -use poly_commitment::srs::SRS; +use poly_commitment::{OpenProof, SRS as _}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use serde_with::serde_as; use std::sync::Arc; @@ -21,14 +22,14 @@ use std::sync::Arc; #[serde_as] #[derive(Serialize, Deserialize, Debug, Clone)] //~spec:startcode -pub struct ProverIndex { +pub struct ProverIndex> { /// constraints system polynomials #[serde(bound = "ConstraintSystem: Serialize + DeserializeOwned")] pub cs: ConstraintSystem, /// The symbolic linearization of our circuit, which can compile to concrete types once certain values are learned in the protocol. #[serde(skip)] - pub linearization: Linearization>>, + pub linearization: Linearization>, Column>, /// The mapping between powers of alpha and constraints #[serde(skip)] @@ -36,7 +37,8 @@ pub struct ProverIndex { /// polynomial commitment keys #[serde(skip)] - pub srs: Arc>, + #[serde(bound(deserialize = "OpeningProof::SRS: Default"))] + pub srs: Arc, /// maximal size of polynomial section pub max_poly_size: usize, @@ -46,7 +48,7 @@ pub struct ProverIndex { /// The verifier index corresponding to this prover index #[serde(skip)] - pub verifier_index: Option>, + pub verifier_index: Option>, /// The verifier index digest corresponding to this prover index #[serde_as(as = "Option")] @@ -54,24 +56,17 @@ pub struct ProverIndex { } //~spec:endcode -impl ProverIndex { +impl> ProverIndex +where + G::BaseField: PrimeField, +{ /// this function compiles the index from constraints - /// - /// # Panics - /// - /// Will panic if `polynomial segment size` is bigger than `circuit`. pub fn create( mut cs: ConstraintSystem, endo_q: G::ScalarField, - srs: Arc>, + srs: Arc, ) -> Self { - let max_poly_size = srs.g.len(); - if cs.public > 0 { - assert!( - max_poly_size >= cs.domain.d1.size(), - "polynomial segment size has to be not smaller than that of the circuit!" - ); - } + let max_poly_size = srs.max_poly_size(); cs.endo = endo_q; // pre-compute the linearization @@ -99,7 +94,10 @@ impl ProverIndex { EFqSponge: Clone + FqSponge, >( &mut self, - ) -> G::BaseField { + ) -> G::BaseField + where + VerifierIndex: Clone, + { if let Some(verifier_index_digest) = self.verifier_index_digest { return verifier_index_digest; } @@ -116,7 +114,10 @@ impl ProverIndex { /// Retrieve or compute the digest for the corresponding verifier index. pub fn verifier_index_digest>( &self, - ) -> G::BaseField { + ) -> G::BaseField + where + VerifierIndex: Clone, + { if let Some(verifier_index_digest) = self.verifier_index_digest { return verifier_index_digest; } @@ -141,21 +142,24 @@ pub mod testing { precomputed_srs, }; use ark_ff::{PrimeField, SquareRootField}; - use poly_commitment::srs::endos; - - /// Create new index for lookups. - /// - /// # Panics - /// - /// Will panic if `constraint system` is not built with `gates` input. - pub fn new_index_for_test_with_lookups( + use ark_poly::{EvaluationDomain, Radix2EvaluationDomain as D}; + use poly_commitment::{evaluation_proof::OpeningProof, srs::SRS, OpenProof}; + + #[allow(clippy::too_many_arguments)] + pub fn new_index_for_test_with_lookups_and_custom_srs< + G: KimchiCurve, + OpeningProof: OpenProof, + F: FnMut(D, usize) -> OpeningProof::SRS, + >( gates: Vec>, public: usize, prev_challenges: usize, lookup_tables: Vec>, runtime_tables: Option>>, disable_gates_checks: bool, - ) -> ProverIndex + override_srs_size: Option, + mut get_srs: F, + ) -> ProverIndex where G::BaseField: PrimeField, G::ScalarField: PrimeField + SquareRootField, @@ -167,32 +171,68 @@ pub mod testing { .public(public) .prev_challenges(prev_challenges) .disable_gates_checks(disable_gates_checks) + .max_poly_size(override_srs_size) .build() .unwrap(); - let mut srs = if cs.domain.d1.log_size_of_group <= precomputed_srs::SERIALIZED_SRS_SIZE { - // TODO: we should trim it if it's smaller - precomputed_srs::get_srs() - } else { - // TODO: we should resume the SRS generation starting from the serialized one - SRS::::create(cs.domain.d1.size()) - }; - - srs.add_lagrange_basis(cs.domain.d1); + let srs_size = override_srs_size.unwrap_or_else(|| cs.domain.d1.size()); + let srs = get_srs(cs.domain.d1, srs_size); let srs = Arc::new(srs); - let (endo_q, _endo_r) = endos::(); - ProverIndex::::create(cs, endo_q, srs) + let &endo_q = G::other_curve_endo(); + ProverIndex::create(cs, endo_q, srs) + } + + /// Create new index for lookups. + /// + /// # Panics + /// + /// Will panic if `constraint system` is not built with `gates` input. + pub fn new_index_for_test_with_lookups( + gates: Vec>, + public: usize, + prev_challenges: usize, + lookup_tables: Vec>, + runtime_tables: Option>>, + disable_gates_checks: bool, + override_srs_size: Option, + ) -> ProverIndex> + where + G::BaseField: PrimeField, + G::ScalarField: PrimeField + SquareRootField, + { + new_index_for_test_with_lookups_and_custom_srs( + gates, + public, + prev_challenges, + lookup_tables, + runtime_tables, + disable_gates_checks, + override_srs_size, + |d1: D, size: usize| { + let log2_size = size.ilog2(); + let mut srs = if log2_size <= precomputed_srs::SERIALIZED_SRS_SIZE { + // TODO: we should trim it if it's smaller + precomputed_srs::get_srs() + } else { + // TODO: we should resume the SRS generation starting from the serialized one + SRS::::create(size) + }; + + srs.add_lagrange_basis(d1); + srs + }, + ) } pub fn new_index_for_test( gates: Vec>, public: usize, - ) -> ProverIndex + ) -> ProverIndex> where G::BaseField: PrimeField, G::ScalarField: PrimeField + SquareRootField, { - new_index_for_test_with_lookups::(gates, public, 0, vec![], None, false) + new_index_for_test_with_lookups::(gates, public, 0, vec![], None, false, None) } } diff --git a/kimchi/src/snarky/constants.rs b/kimchi/src/snarky/constants.rs index bf11fce2a8..2324d4ff87 100644 --- a/kimchi/src/snarky/constants.rs +++ b/kimchi/src/snarky/constants.rs @@ -1,9 +1,7 @@ //! Constants used for poseidon. -use ark_ec::AffineCurve; use ark_ff::Field; use mina_poseidon::poseidon::ArithmeticSpongeParams; -use poly_commitment::commitment::CommitmentCurve; use crate::curve::KimchiCurve; @@ -20,10 +18,8 @@ where { pub fn new>() -> Self { let poseidon = Curve::sponge_params().clone(); - let (endo_q, _endo_r) = Curve::OtherCurve::endos(); - let base = Curve::OtherCurve::prime_subgroup_generator() - .to_coordinates() - .unwrap(); + let endo_q = Curve::other_curve_endo(); + let base = Curve::other_curve_prime_subgroup_generator(); Self { poseidon, diff --git a/kimchi/src/tests/chunked.rs b/kimchi/src/tests/chunked.rs new file mode 100644 index 0000000000..7e00d715aa --- /dev/null +++ b/kimchi/src/tests/chunked.rs @@ -0,0 +1,115 @@ +use super::framework::TestFramework; +use crate::circuits::polynomials::generic::GenericGateSpec; +use crate::circuits::{ + gate::CircuitGate, + wires::{Wire, COLUMNS}, +}; +use ark_ff::{UniformRand, Zero}; +use itertools::iterate; +use mina_curves::pasta::{Fp, Vesta, VestaParameters}; +use mina_poseidon::{ + constants::PlonkSpongeConstantsKimchi, + sponge::{DefaultFqSponge, DefaultFrSponge}, +}; +use std::array; + +type SpongeParams = PlonkSpongeConstantsKimchi; +type BaseSponge = DefaultFqSponge; +type ScalarSponge = DefaultFrSponge; + +fn test_generic_gate_with_srs_override( + circuit_size_log_2: usize, + override_srs_size: Option, +) { + let public = vec![Fp::from(1u8); 5]; + let circuit_size = (1 << circuit_size_log_2) - 15; + + let mut gates_row = iterate(0, |&i| i + 1); + let mut gates = Vec::with_capacity(circuit_size); + let mut witness: [Vec; COLUMNS] = array::from_fn(|_| vec![Fp::zero(); circuit_size]); + + let rng = &mut rand::rngs::OsRng; + + // public input + for p in public.iter() { + let r = gates_row.next().unwrap(); + witness[0][r] = *p; + gates.push(CircuitGate::create_generic_gadget( + Wire::for_row(r), + GenericGateSpec::Pub, + None, + )); + } + + for _ in public.len()..circuit_size { + let r = gates_row.next().unwrap(); + + // First gate + let g1 = GenericGateSpec::Add { + left_coeff: None, + right_coeff: Some(3u32.into()), + output_coeff: None, + }; + let g1_l = ::rand(rng); + let g1_r = ::rand(rng); + let g1_o = g1_l + g1_r * Fp::from(3u32); + witness[0][r] = g1_l; + witness[1][r] = g1_r; + witness[2][r] = g1_o; + + // Second gate + let g2 = GenericGateSpec::Mul { + output_coeff: None, + mul_coeff: Some(2u32.into()), + }; + let g2_l = ::rand(rng); + let g2_r = ::rand(rng); + let g2_o = g2_l * g2_r * Fp::from(2u32); + witness[3][r] = g2_l; + witness[4][r] = g2_r; + witness[5][r] = g2_o; + gates.push(CircuitGate::create_generic_gadget( + Wire::for_row(r), + g1, + Some(g2), + )); + } + + // create and verify proof based on the witness + let framework = TestFramework::::default() + .gates(gates) + .witness(witness) + .public_inputs(public); + let framework = if let Some(srs_size) = override_srs_size { + framework.override_srs_size(srs_size) + } else { + framework + }; + framework + .setup() + .prove_and_verify::() + .unwrap(); +} + +// Disabled, too slow +/*#[test] +fn test_2_to_20_chunked_generic_gate_pub() { + test_generic_gate_with_srs_override(20, Some(1 << 16)) +}*/ + +// Disabled, too slow +/*#[test] +fn test_2_to_18_chunked_generic_gate_pub() { + test_generic_gate_with_srs_override(18, Some(1 << 16)) +}*/ + +#[test] +fn test_2_to_17_chunked_generic_gate_pub() { + test_generic_gate_with_srs_override(17, Some(1 << 16)) +} + +// Disabled; redundant, just for comparison +/*#[test] +fn test_2_to_16_unchunked_generic_gate_pub() { + test_generic_gate_with_srs_override(16, None) +}*/ diff --git a/kimchi/src/tests/foreign_field_add.rs b/kimchi/src/tests/foreign_field_add.rs index 069d3b1bcf..24c1d5a8c1 100644 --- a/kimchi/src/tests/foreign_field_add.rs +++ b/kimchi/src/tests/foreign_field_add.rs @@ -27,7 +27,10 @@ use o1_utils::{ foreign_field::{BigUintForeignFieldHelpers, ForeignElement, HI, LO, MI, TWO_TO_LIMB}, FieldHelpers, Two, }; -use poly_commitment::srs::{endos, SRS}; +use poly_commitment::{ + evaluation_proof::OpeningProof, + srs::{endos, SRS}, +}; use rand::{rngs::StdRng, Rng, SeedableRng}; use std::array; use std::sync::Arc; @@ -313,7 +316,7 @@ fn create_test_constraint_system_ffadd( opcodes: &[FFOps], foreign_field_modulus: BigUint, full: bool, -) -> ProverIndex { +) -> ProverIndex> { let (_next_row, gates) = if full { full_circuit(opcodes, &foreign_field_modulus) } else { @@ -326,7 +329,7 @@ fn create_test_constraint_system_ffadd( let srs = Arc::new(srs); let (endo_q, _endo_r) = endos::(); - ProverIndex::::create(cs, endo_q, srs) + ProverIndex::>::create(cs, endo_q, srs) } // helper to reduce lines of code in repetitive test structure @@ -335,7 +338,10 @@ fn test_ffadd( inputs: Vec, opcodes: &[FFOps], full: bool, -) -> ([Vec; COLUMNS], ProverIndex) { +) -> ( + [Vec; COLUMNS], + ProverIndex>, +) { let index = create_test_constraint_system_ffadd(opcodes, foreign_field_modulus.clone(), full); let witness = if full { @@ -1493,7 +1499,7 @@ fn test_ffadd_finalization() { let srs = Arc::new(srs); let (endo_q, _endo_r) = endos::(); - ProverIndex::::create(cs, endo_q, srs) + ProverIndex::>::create(cs, endo_q, srs) }; for row in 0..witness[0].len() { diff --git a/kimchi/src/tests/framework.rs b/kimchi/src/tests/framework.rs index 158988b94b..b0b79dbdbd 100644 --- a/kimchi/src/tests/framework.rs +++ b/kimchi/src/tests/framework.rs @@ -12,21 +12,34 @@ use crate::{ curve::KimchiCurve, plonk_sponge::FrSponge, proof::{ProverProof, RecursionChallenge}, - prover_index::{testing::new_index_for_test_with_lookups, ProverIndex}, + prover_index::{ + testing::{ + new_index_for_test_with_lookups, new_index_for_test_with_lookups_and_custom_srs, + }, + ProverIndex, + }, verifier::verify, verifier_index::VerifierIndex, }; use ark_ff::PrimeField; +use ark_poly::Radix2EvaluationDomain as D; use groupmap::GroupMap; use mina_poseidon::sponge::FqSponge; use num_bigint::BigUint; -use poly_commitment::commitment::CommitmentCurve; +use poly_commitment::{ + commitment::CommitmentCurve, evaluation_proof::OpeningProof as DlogOpeningProof, OpenProof, +}; use std::{fmt::Write, time::Instant}; // aliases #[derive(Default, Clone)] -pub(crate) struct TestFramework { +pub(crate) struct TestFramework = DlogOpeningProof> +where + G::BaseField: PrimeField, + OpeningProof::SRS: Clone, + VerifierIndex: Clone, +{ gates: Option>>, witness: Option<[Vec; COLUMNS]>, public_inputs: Vec, @@ -36,18 +49,26 @@ pub(crate) struct TestFramework { recursion: Vec>, num_prev_challenges: usize, disable_gates_checks: bool, + override_srs_size: Option, - prover_index: Option>, - verifier_index: Option>, + prover_index: Option>, + verifier_index: Option>, } #[derive(Clone)] -pub(crate) struct TestRunner(TestFramework); +pub(crate) struct TestRunner = DlogOpeningProof>( + TestFramework, +) +where + G::BaseField: PrimeField, + OpeningProof::SRS: Clone, + VerifierIndex: Clone; -impl TestFramework +impl> TestFramework where G::BaseField: PrimeField, - G::ScalarField: PrimeField, + OpeningProof::SRS: Clone, + VerifierIndex: Clone, { #[must_use] pub(crate) fn gates(mut self, gates: Vec>) -> Self { @@ -94,6 +115,49 @@ where self } + #[must_use] + pub(crate) fn override_srs_size(mut self, size: usize) -> Self { + self.override_srs_size = Some(size); + self + } + + /// creates the indexes + #[must_use] + pub(crate) fn setup_with_custom_srs, usize) -> OpeningProof::SRS>( + mut self, + get_srs: F, + ) -> TestRunner { + let start = Instant::now(); + + let lookup_tables = std::mem::take(&mut self.lookup_tables); + let runtime_tables_setup = self.runtime_tables_setup.take(); + + let index = new_index_for_test_with_lookups_and_custom_srs( + self.gates.take().unwrap(), + self.public_inputs.len(), + self.num_prev_challenges, + lookup_tables, + runtime_tables_setup, + self.disable_gates_checks, + self.override_srs_size, + get_srs, + ); + println!( + "- time to create prover index: {:?}s", + start.elapsed().as_secs() + ); + + self.verifier_index = Some(index.verifier_index()); + self.prover_index = Some(index); + + TestRunner(self) + } +} + +impl TestFramework +where + G::BaseField: PrimeField, +{ /// creates the indexes #[must_use] pub(crate) fn setup(mut self) -> TestRunner { @@ -109,6 +173,7 @@ where lookup_tables, runtime_tables_setup, self.disable_gates_checks, + self.override_srs_size, ); println!( "- time to create prover index: {:?}s", @@ -122,10 +187,12 @@ where } } -impl TestRunner +impl> TestRunner where G::ScalarField: PrimeField + Clone, G::BaseField: PrimeField + Clone, + OpeningProof::SRS: Clone, + VerifierIndex: Clone, { #[must_use] pub(crate) fn runtime_tables( @@ -148,7 +215,7 @@ where self } - pub(crate) fn prover_index(&self) -> &ProverIndex { + pub(crate) fn prover_index(&self) -> &ProverIndex { self.0.prover_index.as_ref().unwrap() } @@ -219,7 +286,7 @@ where // verify the proof (propagate any errors) let start = Instant::now(); - verify::( + verify::( &group_map, &self.0.verifier_index.unwrap(), &proof, diff --git a/kimchi/src/tests/generic.rs b/kimchi/src/tests/generic.rs index 3f8cbc98b6..f9efc83341 100644 --- a/kimchi/src/tests/generic.rs +++ b/kimchi/src/tests/generic.rs @@ -86,3 +86,40 @@ fn test_generic_gate_pub_empty() { .prove_and_verify::() .unwrap(); } + +#[cfg(feature = "bn254")] +#[test] +fn test_generic_gate_pairing() { + type Fp = ark_bn254::Fr; + type SpongeParams = PlonkSpongeConstantsKimchi; + type BaseSponge = DefaultFqSponge; + type ScalarSponge = DefaultFrSponge; + + use ark_ff::UniformRand; + + let public = vec![Fp::from(3u8); 5]; + let gates = create_circuit(0, public.len()); + + let rng = &mut rand::rngs::OsRng; + let x = Fp::rand(rng); + + // create witness + let mut witness: [Vec; COLUMNS] = array::from_fn(|_| vec![Fp::zero(); gates.len()]); + fill_in_witness(0, &mut witness, &public); + + // create and verify proof based on the witness + >, + > as Default>::default() + .gates(gates) + .witness(witness) + .public_inputs(public) + .setup_with_custom_srs(|d1, usize| { + let mut srs = poly_commitment::pairing_proof::PairingSRS::create(x, usize); + srs.full_srs.add_lagrange_basis(d1); + srs + }) + .prove_and_verify::() + .unwrap(); +} diff --git a/kimchi/src/tests/lookup.rs b/kimchi/src/tests/lookup.rs index 8fa4ac9723..a674eba78f 100644 --- a/kimchi/src/tests/lookup.rs +++ b/kimchi/src/tests/lookup.rs @@ -8,7 +8,7 @@ use crate::circuits::{ polynomial::COLUMNS, wires::Wire, }; -use ark_ff::Zero; +use ark_ff::{UniformRand, Zero}; use mina_curves::pasta::{Fp, Vesta, VestaParameters}; use mina_poseidon::{ constants::PlonkSpongeConstantsKimchi, @@ -126,6 +126,71 @@ fn lookup_gate_rejects_bad_lookups_multiple_tables() { setup_lookup_proof(false, 500, vec![100, 50, 50, 2, 2]) } +fn setup_successfull_runtime_table_test( + runtime_table_cfgs: Vec>, + runtime_tables: Vec>, + lookups: Vec, +) { + let mut rng = rand::thread_rng(); + let nb_lookups = lookups.len(); + + // circuit + let mut gates = vec![]; + for row in 0..nb_lookups { + gates.push(CircuitGate::new( + GateType::Lookup, + Wire::for_row(row), + vec![], + )); + } + + // witness + let witness = { + let mut cols: [_; COLUMNS] = array::from_fn(|_col| vec![Fp::zero(); gates.len()]); + + // only the first 7 registers are used in the lookup gate + let (lookup_cols, _rest) = cols.split_at_mut(7); + + for (i, table_id) in lookups.into_iter().enumerate() { + lookup_cols[0][i] = Fp::from(table_id); + let rt = runtime_table_cfgs + .clone() + .into_iter() + .find(|rt_cfg| rt_cfg.id == table_id) + .unwrap(); + let len_rt = rt.len(); + let first_column = rt.first_column; + let data = runtime_tables + .clone() + .into_iter() + .find(|rt| rt.id == table_id) + .unwrap() + .data; + + // create queries into our runtime lookup table. + // We will set [w1, w2], [w3, w4] and [w5, w6] to randon indexes and + // the corresponding values + let lookup_cols = &mut lookup_cols[1..]; + for chunk in lookup_cols.chunks_mut(2) { + let idx = rng.gen_range(0..len_rt); + chunk[0][i] = first_column[idx]; + chunk[1][i] = data[idx]; + } + } + cols + }; + + // run test + TestFramework::::default() + .gates(gates) + .witness(witness) + .runtime_tables_setup(runtime_table_cfgs) + .setup() + .runtime_tables(runtime_tables) + .prove_and_verify::() + .unwrap(); +} + #[test] fn test_runtime_table() { let num = 5; @@ -381,5 +446,130 @@ fn test_negative_test_runtime_table_prover_uses_undefined_id_in_index_and_witnes ); } -// TODO: add a test with a runtime table with ID 0 (it should panic) -// See https://github.com/MinaProtocol/mina/issues/13603 +#[test] +fn test_runtime_table_with_more_than_one_runtime_table_data_given_by_prover() { + let mut rng = rand::thread_rng(); + + let first_column = [0, 1, 2, 3, 4]; + let len = first_column.len(); + + let cfg = RuntimeTableCfg { + id: 1, + first_column: first_column.into_iter().map(Into::into).collect(), + }; + + /* We want to simulate this + table ID | idx | v | v2 + 1 | 0 | 0 | 42 + 1 | 1 | 2 | 32 + 1 | 2 | 4 | 22 + 1 | 3 | 5 | 12 + 1 | 4 | 4 | 2 + */ + + let data_v: Vec = [0u32, 2, 3, 4, 5].into_iter().map(Into::into).collect(); + let data_v2: Vec = [42, 32, 22, 12, 2].into_iter().map(Into::into).collect(); + let runtime_tables: Vec> = vec![ + RuntimeTable { + id: 1, + data: data_v.clone(), + }, + RuntimeTable { + id: 1, + data: data_v2, + }, + ]; + + // circuit + let mut gates = vec![]; + for row in 0..20 { + gates.push(CircuitGate::new( + GateType::Lookup, + Wire::for_row(row), + vec![], + )); + } + + // witness + let witness = { + let mut cols: [_; COLUMNS] = array::from_fn(|_col| vec![Fp::zero(); gates.len()]); + + // only the first 7 registers are used in the lookup gate + let (lookup_cols, _rest) = cols.split_at_mut(7); + + for row in 0..20 { + // the first register is the table id. + lookup_cols[0][row] = 1.into(); + + // create queries into our runtime lookup table. + // We will set [w1, w2], [w3, w4] and [w5, w6] to randon indexes and + // the corresponding values + let lookup_cols = &mut lookup_cols[1..]; + for chunk in lookup_cols.chunks_mut(2) { + let idx = rng.gen_range(0..len); + chunk[0][row] = first_column[idx].into(); + chunk[1][row] = data_v[idx]; + } + } + cols + }; + + print_witness(&witness, 0, 20); + + // run test + let err = TestFramework::::default() + .gates(gates) + .witness(witness) + .runtime_tables_setup(vec![cfg]) + .setup() + .runtime_tables(runtime_tables) + .prove_and_verify::() + .unwrap_err(); + assert_eq!( + err, + "the runtime tables provided did not match the index's configuration" + ); +} + +#[test] +fn test_runtime_table_only_one_table_with_id_zero_with_non_zero_entries_fixed_values() { + let first_column = [0, 1, 2, 3, 4, 5]; + let table_id = 0; + + let cfg = RuntimeTableCfg { + id: table_id, + first_column: first_column.into_iter().map(Into::into).collect(), + }; + + let data: Vec = [0u32, 1, 2, 3, 4, 5].into_iter().map(Into::into).collect(); + let runtime_table = RuntimeTable { id: table_id, data }; + + let lookups: Vec = [0; 20].into(); + + setup_successfull_runtime_table_test(vec![cfg], vec![runtime_table], lookups); +} + +#[test] +fn test_runtime_table_only_one_table_with_id_zero_with_non_zero_entries_random_values() { + let mut rng = rand::thread_rng(); + + let len = rng.gen_range(1usize..1000); + let first_column: Vec = (0..len as i32).collect(); + + let table_id = 0; + + let cfg = RuntimeTableCfg { + id: table_id, + first_column: first_column.clone().into_iter().map(Into::into).collect(), + }; + + let data: Vec = first_column + .into_iter() + .map(|_| UniformRand::rand(&mut rng)) + .collect(); + let runtime_table = RuntimeTable { id: table_id, data }; + + let lookups: Vec = [0; 20].into(); + + setup_successfull_runtime_table_test(vec![cfg], vec![runtime_table], lookups); +} diff --git a/kimchi/src/tests/mod.rs b/kimchi/src/tests/mod.rs index 8e86206d3b..7ac97faedb 100644 --- a/kimchi/src/tests/mod.rs +++ b/kimchi/src/tests/mod.rs @@ -1,4 +1,5 @@ mod and; +mod chunked; mod ec; mod endomul; mod endomul_scalar; diff --git a/kimchi/src/tests/not.rs b/kimchi/src/tests/not.rs index 0979a12c61..42cd0705f7 100644 --- a/kimchi/src/tests/not.rs +++ b/kimchi/src/tests/not.rs @@ -23,6 +23,7 @@ use mina_poseidon::{ }; use num_bigint::BigUint; use o1_utils::{BigUintHelpers, BitwiseOps, FieldHelpers, RandomField}; +use poly_commitment::evaluation_proof::OpeningProof; use rand::{rngs::StdRng, SeedableRng}; type PallasField = ::BaseField; @@ -399,10 +400,17 @@ fn test_bad_not_gnrc() { }) ); witness[0][1] += PallasField::one(); - let index = - new_index_for_test_with_lookups(cs.gates, 1, 0, vec![xor::lookup_table()], None, false); + let index = new_index_for_test_with_lookups( + cs.gates, + 1, + 0, + vec![xor::lookup_table()], + None, + false, + None, + ); assert_eq!( - index.cs.gates[1].verify::(1, &witness, &index, &[]), + index.cs.gates[1].verify::>(1, &witness, &index, &[]), Err(("generic: incorrect gate").to_string()) ); } diff --git a/kimchi/src/tests/range_check.rs b/kimchi/src/tests/range_check.rs index 09d2528495..27240db942 100644 --- a/kimchi/src/tests/range_check.rs +++ b/kimchi/src/tests/range_check.rs @@ -38,6 +38,7 @@ use mina_poseidon::{ }; use poly_commitment::{ commitment::CommitmentCurve, + evaluation_proof::OpeningProof, srs::{endos, SRS}, }; @@ -53,7 +54,10 @@ const RNG_SEED: [u8; 32] = [ 0, 33, 210, 215, 172, 130, 24, 164, 12, ]; -fn create_test_prover_index(public_size: usize, compact: bool) -> ProverIndex { +fn create_test_prover_index( + public_size: usize, + compact: bool, +) -> ProverIndex> { let (_next_row, gates) = if compact { CircuitGate::::create_compact_multi_range_check(0) } else { @@ -67,6 +71,7 @@ fn create_test_prover_index(public_size: usize, compact: bool) -> ProverIndex(); - ProverIndex::::create(cs, endo_q, srs) + ProverIndex::>::create(cs, endo_q, srs) }; // Witness layout (positive test case) @@ -1217,7 +1222,7 @@ fn verify_range_check_valid_proof1() { let verifier_index = prover_index.verifier_index(); // Verify proof - let res = verify::( + let res = verify::>( &group_map, &verifier_index, &proof, diff --git a/kimchi/src/tests/recursion.rs b/kimchi/src/tests/recursion.rs index 7936bb738e..d7f028acb5 100644 --- a/kimchi/src/tests/recursion.rs +++ b/kimchi/src/tests/recursion.rs @@ -11,7 +11,7 @@ use mina_poseidon::{ sponge::{DefaultFqSponge, DefaultFrSponge}, }; use o1_utils::math; -use poly_commitment::commitment::b_poly_coefficients; +use poly_commitment::{commitment::b_poly_coefficients, SRS as _}; use rand::prelude::*; use std::array; @@ -43,7 +43,7 @@ fn test_recursion() { let comm = { let coeffs = b_poly_coefficients(&chals); let b = DensePolynomial::from_coefficients_vec(coeffs); - index.srs.commit_non_hiding(&b, None) + index.srs.commit_non_hiding(&b, 1, None) }; RecursionChallenge::new(chals, comm) }; diff --git a/kimchi/src/tests/rot.rs b/kimchi/src/tests/rot.rs index 0a3ef74f02..c731884f7e 100644 --- a/kimchi/src/tests/rot.rs +++ b/kimchi/src/tests/rot.rs @@ -27,7 +27,10 @@ use mina_poseidon::{ FqSponge, }; use o1_utils::Two; -use poly_commitment::srs::{endos, SRS}; +use poly_commitment::{ + evaluation_proof::OpeningProof, + srs::{endos, SRS}, +}; use rand::{rngs::StdRng, Rng, SeedableRng}; type PallasField = ::BaseField; @@ -170,22 +173,20 @@ fn test_rot_random() { test_rot::(word, rot, RotMode::Right); } -#[should_panic] #[test] // Test that a bad rotation fails as expected fn test_zero_rot() { let rng = &mut StdRng::from_seed(RNG_SEED); let word = rng.gen_range(0..2u128.pow(64)) as u64; - create_rot_witness::(word, 0, RotMode::Left); + test_rot::(word, 0, RotMode::Left); } -#[should_panic] #[test] // Test that a bad rotation fails as expected fn test_large_rot() { let rng = &mut StdRng::from_seed(RNG_SEED); let word = rng.gen_range(0..2u128.pow(64)) as u64; - create_rot_witness::(word, 64, RotMode::Left); + test_rot::(word, 64, RotMode::Left); } #[test] @@ -362,7 +363,7 @@ fn test_rot_finalization() { let srs = Arc::new(srs); let (endo_q, _endo_r) = endos::(); - ProverIndex::::create(cs, endo_q, srs) + ProverIndex::>::create(cs, endo_q, srs) }; for row in 0..witness[0].len() { diff --git a/kimchi/src/tests/serde.rs b/kimchi/src/tests/serde.rs index 4de9369100..43883066d9 100644 --- a/kimchi/src/tests/serde.rs +++ b/kimchi/src/tests/serde.rs @@ -17,7 +17,7 @@ use mina_poseidon::{ constants::PlonkSpongeConstantsKimchi, sponge::{DefaultFqSponge, DefaultFrSponge}, }; -use poly_commitment::{commitment::CommitmentCurve, srs::SRS}; +use poly_commitment::{commitment::CommitmentCurve, evaluation_proof::OpeningProof, srs::SRS}; use std::array; use std::time::Instant; @@ -41,7 +41,8 @@ mod tests { println!("proof size: {} bytes", ser_pf.len()); // deserialize the proof - let de_pf: ProverProof = rmp_serde::from_slice(&ser_pf).unwrap(); + let de_pf: ProverProof> = + rmp_serde::from_slice(&ser_pf).unwrap(); // verify the deserialized proof (must accept the proof) ctx.batch_verification(&vec![(de_pf, public_input)]); @@ -72,7 +73,7 @@ mod tests { .unwrap(); // deserialize the verifier index - let mut verifier_index_deserialize: VerifierIndex> = + let mut verifier_index_deserialize: VerifierIndex, _> = serde_json::from_str(&verifier_index_serialize).unwrap(); // add srs with lagrange bases @@ -80,10 +81,11 @@ mod tests { srs.add_lagrange_basis(verifier_index.domain); verifier_index_deserialize.powers_of_alpha = index.powers_of_alpha; verifier_index_deserialize.linearization = index.linearization; + verifier_index_deserialize.srs = std::sync::Arc::new(srs); // verify the proof let start = Instant::now(); - verify::( + verify::>( &group_map, &verifier_index_deserialize, &proof, diff --git a/kimchi/src/tests/xor.rs b/kimchi/src/tests/xor.rs index 805b7a4d96..0344e0aea3 100644 --- a/kimchi/src/tests/xor.rs +++ b/kimchi/src/tests/xor.rs @@ -21,7 +21,10 @@ use mina_poseidon::{ }; use num_bigint::BigUint; use o1_utils::{BigUintHelpers, BitwiseOps, FieldHelpers, RandomField}; -use poly_commitment::srs::{endos, SRS}; +use poly_commitment::{ + evaluation_proof::OpeningProof, + srs::{endos, SRS}, +}; use rand::{rngs::StdRng, SeedableRng}; use super::framework::TestFramework; @@ -398,7 +401,7 @@ fn test_xor_finalization() { let srs = Arc::new(srs); let (endo_q, _endo_r) = endos::(); - ProverIndex::::create(cs, endo_q, srs) + ProverIndex::>::create(cs, endo_q, srs) }; for row in 0..witness[0].len() { diff --git a/kimchi/src/verifier.rs b/kimchi/src/verifier.rs index 1cd83b3ee7..f831b05cd3 100644 --- a/kimchi/src/verifier.rs +++ b/kimchi/src/verifier.rs @@ -3,10 +3,11 @@ use crate::{ circuits::{ argument::ArgumentType, + berkeley_columns::Column, constraints::ConstraintSystem, - expr::{Column, Constants, PolishToken}, + expr::{Constants, PolishToken}, gate::GateType, - lookup::tables::combine_table, + lookup::{lookups::LookupPattern, tables::combine_table}, polynomials::permutation, scalars::RandomOracles, wires::{COLUMNS, PERMUTS}, @@ -15,35 +16,37 @@ use crate::{ error::VerifyError, oracles::OraclesResult, plonk_sponge::FrSponge, - proof::{ - LookupEvaluations, PointEvaluations, ProofEvaluations, ProverProof, RecursionChallenge, - }, + proof::{PointEvaluations, ProofEvaluations, ProverProof, RecursionChallenge}, verifier_index::VerifierIndex, }; use ark_ec::AffineCurve; use ark_ff::{Field, One, PrimeField, Zero}; -use ark_poly::{EvaluationDomain, Polynomial}; +use ark_poly::{univariate::DensePolynomial, EvaluationDomain, Polynomial}; use mina_poseidon::{sponge::ScalarChallenge, FqSponge}; -use poly_commitment::commitment::{ - absorb_commitment, combined_inner_product, BatchEvaluationProof, Evaluation, PolyComm, +use o1_utils::ExtendedDensePolynomial; +use poly_commitment::{ + commitment::{ + absorb_commitment, combined_inner_product, BatchEvaluationProof, Evaluation, PolyComm, + }, + OpenProof, SRS as _, }; use rand::thread_rng; /// The result of a proof verification. pub type Result = std::result::Result; -pub struct Context<'a, G: KimchiCurve> { +pub struct Context<'a, G: KimchiCurve, OpeningProof: OpenProof> { /// The [VerifierIndex] associated to the proof - pub verifier_index: &'a VerifierIndex, + pub verifier_index: &'a VerifierIndex, /// The proof to verify - pub proof: &'a ProverProof, + pub proof: &'a ProverProof, /// The public input used in the creation of the proof pub public_input: &'a [G::ScalarField], } -impl<'a, G: KimchiCurve> Context<'a, G> { +impl<'a, G: KimchiCurve, OpeningProof: OpenProof> Context<'a, G, OpeningProof> { pub fn get_column(&self, col: Column) -> Option<&'a PolyComm> { use Column::*; match col { @@ -64,7 +67,7 @@ impl<'a, G: KimchiCurve> Context<'a, G> { .runtime_tables_selector .as_ref()?, ), - LookupRuntimeTable => None, + LookupRuntimeTable => self.proof.commitments.lookup.as_ref()?.runtime.as_ref(), Index(t) => { use GateType::*; match t { @@ -91,7 +94,7 @@ impl<'a, G: KimchiCurve> Context<'a, G> { } } -impl ProverProof +impl> ProverProof where G::BaseField: PrimeField, { @@ -109,9 +112,9 @@ where EFrSponge: FrSponge, >( &self, - index: &VerifierIndex, + index: &VerifierIndex, public_comm: &PolyComm, - public_input: &[G::ScalarField], + public_input: Option<&[G::ScalarField]>, ) -> Result> { //~ //~ #### Fiat-Shamir argument @@ -121,8 +124,19 @@ where let n = index.domain.size; let (_, endo_r) = G::endos(); + let chunk_size = { + let d1_size = index.domain.size(); + if d1_size < index.max_poly_size { + 1 + } else { + d1_size / index.max_poly_size + } + }; + + let zk_rows = index.zk_rows; + //~ 1. Setup the Fq-Sponge. - let mut fq_sponge = EFqSponge::new(G::OtherCurve::sponge_params()); + let mut fq_sponge = EFqSponge::new(G::other_curve_sponge_params()); //~ 1. Absorb the digest of the VerifierIndex. let verifier_index_digest = index.digest::(); @@ -214,9 +228,13 @@ where //~ 1. Derive $\alpha$ from $\alpha'$ using the endomorphism (TODO: details). let alpha = alpha_chal.to_field(endo_r); - //~ 1. Enforce that the length of the $t$ commitment is of size `PERMUTS`. - if self.commitments.t_comm.unshifted.len() != PERMUTS { - return Err(VerifyError::IncorrectCommitmentLength("t")); + //~ 1. Enforce that the length of the $t$ commitment is of size 7. + if self.commitments.t_comm.unshifted.len() > chunk_size * 7 { + return Err(VerifyError::IncorrectCommitmentLength( + "t", + chunk_size * 7, + self.commitments.t_comm.unshifted.len(), + )); } //~ 1. Absorb the commitment to the quotient polynomial $t$ into the argument. @@ -278,45 +296,53 @@ where let mut all_alphas = index.powers_of_alpha.clone(); all_alphas.instantiate(alpha); - // compute Lagrange base evaluation denominators - let w: Vec<_> = index.domain.elements().take(public_input.len()).collect(); + let public_evals = if let Some(public_evals) = &self.evals.public { + [public_evals.zeta.clone(), public_evals.zeta_omega.clone()] + } else if chunk_size > 1 { + return Err(VerifyError::MissingPublicInputEvaluation); + } else if let Some(public_input) = public_input { + // compute Lagrange base evaluation denominators + let w: Vec<_> = index.domain.elements().take(public_input.len()).collect(); - let mut zeta_minus_x: Vec<_> = w.iter().map(|w| zeta - w).collect(); + let mut zeta_minus_x: Vec<_> = w.iter().map(|w| zeta - w).collect(); - w.iter() - .take(public_input.len()) - .for_each(|w| zeta_minus_x.push(zetaw - w)); + w.iter() + .take(public_input.len()) + .for_each(|w| zeta_minus_x.push(zetaw - w)); - ark_ff::fields::batch_inversion::(&mut zeta_minus_x); + ark_ff::fields::batch_inversion::(&mut zeta_minus_x); - //~ 1. Evaluate the negated public polynomial (if present) at $\zeta$ and $\zeta\omega$. - //~ - //~ NOTE: this works only in the case when the poly segment size is not smaller than that of the domain. - let public_evals = if public_input.is_empty() { - [vec![G::ScalarField::zero()], vec![G::ScalarField::zero()]] + //~ 1. Evaluate the negated public polynomial (if present) at $\zeta$ and $\zeta\omega$. + //~ + //~ NOTE: this works only in the case when the poly segment size is not smaller than that of the domain. + if public_input.is_empty() { + [vec![G::ScalarField::zero()], vec![G::ScalarField::zero()]] + } else { + [ + vec![ + (public_input + .iter() + .zip(zeta_minus_x.iter()) + .zip(index.domain.elements()) + .map(|((p, l), w)| -*l * p * w) + .fold(G::ScalarField::zero(), |x, y| x + y)) + * (zeta1 - G::ScalarField::one()) + * index.domain.size_inv, + ], + vec![ + (public_input + .iter() + .zip(zeta_minus_x[public_input.len()..].iter()) + .zip(index.domain.elements()) + .map(|((p, l), w)| -*l * p * w) + .fold(G::ScalarField::zero(), |x, y| x + y)) + * index.domain.size_inv + * (zetaw.pow([n]) - G::ScalarField::one()), + ], + ] + } } else { - [ - vec![ - (public_input - .iter() - .zip(zeta_minus_x.iter()) - .zip(index.domain.elements()) - .map(|((p, l), w)| -*l * p * w) - .fold(G::ScalarField::zero(), |x, y| x + y)) - * (zeta1 - G::ScalarField::one()) - * index.domain.size_inv, - ], - vec![ - (public_input - .iter() - .zip(zeta_minus_x[public_input.len()..].iter()) - .zip(index.domain.elements()) - .map(|((p, l), w)| -*l * p * w) - .fold(G::ScalarField::zero(), |x, y| x + y)) - * index.domain.size_inv - * (zetaw.pow([n]) - G::ScalarField::one()), - ], - ] + return Err(VerifyError::MissingPublicInputEvaluation); }; //~ 1. Absorb the unique evaluation of ft: $ft(\zeta\omega)$. @@ -351,7 +377,8 @@ where //~ 1. Compute the evaluation of $ft(\zeta)$. let ft_eval0 = { - let zkp = index.zkpm().evaluate(&zeta); + let permutation_vanishing_polynomial = + index.permutation_vanishing_polynomial_m().evaluate(&zeta); let zeta1m1 = zeta1 - G::ScalarField::one(); let mut alpha_powers = @@ -366,7 +393,10 @@ where .next() .expect("missing power of alpha for permutation"); - let init = (evals.w[PERMUTS - 1].zeta + gamma) * evals.z.zeta_omega * alpha0 * zkp; + let init = (evals.w[PERMUTS - 1].zeta + gamma) + * evals.z.zeta_omega + * alpha0 + * permutation_vanishing_polynomial; let mut ft_eval0 = evals .w .iter() @@ -374,18 +404,20 @@ where .map(|(w, s)| (beta * s.zeta) + w.zeta + gamma) .fold(init, |x, y| x * y); - ft_eval0 -= if public_evals[0].is_empty() { - G::ScalarField::zero() - } else { - public_evals[0][0] - }; + ft_eval0 -= DensePolynomial::eval_polynomial( + &public_evals[0], + powers_of_eval_points_for_chunks.zeta, + ); ft_eval0 -= evals .w .iter() .zip(index.shift.iter()) .map(|(w, s)| gamma + (beta * zeta * s) + w.zeta) - .fold(alpha0 * zkp * evals.z.zeta, |x, y| x * y); + .fold( + alpha0 * permutation_vanishing_polynomial * evals.z.zeta, + |x, y| x * y, + ); let numerator = ((zeta1m1 * alpha1 * (zeta - index.w())) + (zeta1m1 * alpha2 * (zeta - G::ScalarField::one()))) @@ -403,6 +435,7 @@ where joint_combiner: joint_combiner.as_ref().map(|j| j.1), endo_coefficient: index.endo, mds: &G::sponge_params().mds, + zk_rows, }; ft_eval0 -= PolishToken::evaluate( @@ -417,57 +450,124 @@ where ft_eval0 }; - let combined_inner_product = { - let ft_eval0 = vec![ft_eval0]; - let ft_eval1 = vec![self.ft_eval1]; - - #[allow(clippy::type_complexity)] - let mut es: Vec<(Vec>, Option)> = - polys.iter().map(|(_, e)| (e.clone(), None)).collect(); - es.push((public_evals.to_vec(), None)); - es.push((vec![ft_eval0, ft_eval1], None)); - for col in [ - Column::Z, - Column::Index(GateType::Generic), - Column::Index(GateType::Poseidon), - ] - .into_iter() - .chain((0..COLUMNS).map(Column::Witness)) - .chain((0..COLUMNS).map(Column::Coefficient)) - .chain((0..PERMUTS - 1).map(Column::Permutation)) - .chain( - index - .lookup_index - .as_ref() - .map(|li| { - (0..li.lookup_info.max_per_row + 1) - .map(Column::LookupSorted) - .chain([Column::LookupAggreg, Column::LookupTable].into_iter()) - .chain( - li.runtime_tables_selector - .as_ref() - .map(|_| [Column::LookupRuntimeTable].into_iter()) - .into_iter() - .flatten(), - ) - }) - .into_iter() - .flatten(), - ) { - es.push(( - { - let evals = self - .evals - .get_column(col) - .ok_or(VerifyError::MissingEvaluation(col))?; - vec![evals.zeta.clone(), evals.zeta_omega.clone()] - }, - None, - )) - } + let combined_inner_product = + { + let ft_eval0 = vec![ft_eval0]; + let ft_eval1 = vec![self.ft_eval1]; + + #[allow(clippy::type_complexity)] + let mut es: Vec<(Vec>, Option)> = + polys.iter().map(|(_, e)| (e.clone(), None)).collect(); + es.push((public_evals.to_vec(), None)); + es.push((vec![ft_eval0, ft_eval1], None)); + for col in [ + Column::Z, + Column::Index(GateType::Generic), + Column::Index(GateType::Poseidon), + Column::Index(GateType::CompleteAdd), + Column::Index(GateType::VarBaseMul), + Column::Index(GateType::EndoMul), + Column::Index(GateType::EndoMulScalar), + ] + .into_iter() + .chain((0..COLUMNS).map(Column::Witness)) + .chain((0..COLUMNS).map(Column::Coefficient)) + .chain((0..PERMUTS - 1).map(Column::Permutation)) + .chain( + index + .range_check0_comm + .as_ref() + .map(|_| Column::Index(GateType::RangeCheck0)), + ) + .chain( + index + .range_check1_comm + .as_ref() + .map(|_| Column::Index(GateType::RangeCheck1)), + ) + .chain( + index + .foreign_field_add_comm + .as_ref() + .map(|_| Column::Index(GateType::ForeignFieldAdd)), + ) + .chain( + index + .foreign_field_mul_comm + .as_ref() + .map(|_| Column::Index(GateType::ForeignFieldMul)), + ) + .chain( + index + .xor_comm + .as_ref() + .map(|_| Column::Index(GateType::Xor16)), + ) + .chain( + index + .rot_comm + .as_ref() + .map(|_| Column::Index(GateType::Rot64)), + ) + .chain( + index + .lookup_index + .as_ref() + .map(|li| { + (0..li.lookup_info.max_per_row + 1) + .map(Column::LookupSorted) + .chain([Column::LookupAggreg, Column::LookupTable].into_iter()) + .chain( + li.runtime_tables_selector + .as_ref() + .map(|_| [Column::LookupRuntimeTable].into_iter()) + .into_iter() + .flatten(), + ) + .chain( + self.evals + .runtime_lookup_table_selector + .as_ref() + .map(|_| Column::LookupRuntimeSelector), + ) + .chain( + self.evals + .xor_lookup_selector + .as_ref() + .map(|_| Column::LookupKindIndex(LookupPattern::Xor)), + ) + .chain( + self.evals + .lookup_gate_lookup_selector + .as_ref() + .map(|_| Column::LookupKindIndex(LookupPattern::Lookup)), + ) + .chain( + self.evals.range_check_lookup_selector.as_ref().map(|_| { + Column::LookupKindIndex(LookupPattern::RangeCheck) + }), + ) + .chain(self.evals.foreign_field_mul_lookup_selector.as_ref().map( + |_| Column::LookupKindIndex(LookupPattern::ForeignFieldMul), + )) + }) + .into_iter() + .flatten(), + ) { + es.push(( + { + let evals = self + .evals + .get_column(col) + .ok_or(VerifyError::MissingEvaluation(col))?; + vec![evals.zeta.clone(), evals.zeta_omega.clone()] + }, + None, + )) + } - combined_inner_product(&evaluation_points, &v, &u, &es, index.srs().g.len()) - }; + combined_inner_product(&evaluation_points, &v, &u, &es, index.srs().max_poly_size()) + }; let oracles = RandomOracles { joint_combiner, @@ -501,66 +601,151 @@ where /// Enforce the length of evaluations inside [`Proof`]. /// Atm, the length of evaluations(both `zeta` and `zeta_omega`) SHOULD be 1. /// The length value is prone to future change. -fn check_proof_evals_len(proof: &ProverProof) -> Result<()> +fn check_proof_evals_len( + proof: &ProverProof, + expected_size: usize, +) -> Result<()> where G: KimchiCurve, G::BaseField: PrimeField, { let ProofEvaluations { + public, w, z, s, coefficients, - lookup, generic_selector, poseidon_selector, + complete_add_selector, + mul_selector, + emul_selector, + endomul_scalar_selector, + range_check0_selector, + range_check1_selector, + foreign_field_add_selector, + foreign_field_mul_selector, + xor_selector, + rot_selector, + lookup_aggregation, + lookup_table, + lookup_sorted, + runtime_lookup_table, + runtime_lookup_table_selector, + xor_lookup_selector, + lookup_gate_lookup_selector, + range_check_lookup_selector, + foreign_field_mul_lookup_selector, } = &proof.evals; - let check_eval_len = |eval: &PointEvaluations>| -> Result<()> { - if eval.zeta.len().is_one() && eval.zeta_omega.len().is_one() { - Ok(()) + let check_eval_len = |eval: &PointEvaluations>, str: &'static str| -> Result<()> { + if eval.zeta.len() != expected_size { + Err(VerifyError::IncorrectEvaluationsLength( + expected_size, + eval.zeta.len(), + str, + )) + } else if eval.zeta_omega.len() != expected_size { + Err(VerifyError::IncorrectEvaluationsLength( + expected_size, + eval.zeta_omega.len(), + str, + )) } else { - Err(VerifyError::IncorrectEvaluationsLength) + Ok(()) } }; + if let Some(public) = public { + check_eval_len(public, "public input")?; + } + for w_i in w { - check_eval_len(w_i)?; + check_eval_len(w_i, "witness")?; } - check_eval_len(z)?; + check_eval_len(z, "permutation accumulator")?; for s_i in s { - check_eval_len(s_i)?; + check_eval_len(s_i, "permutation shifts")?; } for coeff in coefficients { - check_eval_len(coeff)?; + check_eval_len(coeff, "coefficients")?; } - if let Some(LookupEvaluations { - sorted, - aggreg, - table, - runtime, - }) = lookup - { - for sorted_i in sorted { - check_eval_len(sorted_i)?; - } - check_eval_len(aggreg)?; - check_eval_len(table)?; - if let Some(runtime) = &runtime { - check_eval_len(runtime)?; - } + + // Lookup evaluations + for sorted in lookup_sorted.iter().flatten() { + check_eval_len(sorted, "lookup sorted")? + } + + if let Some(lookup_aggregation) = lookup_aggregation { + check_eval_len(lookup_aggregation, "lookup aggregation")?; + } + if let Some(lookup_table) = lookup_table { + check_eval_len(lookup_table, "lookup table")?; + } + if let Some(runtime_lookup_table) = runtime_lookup_table { + check_eval_len(runtime_lookup_table, "runtime lookup table")?; + } + + check_eval_len(generic_selector, "generic selector")?; + check_eval_len(poseidon_selector, "poseidon selector")?; + check_eval_len(complete_add_selector, "complete add selector")?; + check_eval_len(mul_selector, "mul selector")?; + check_eval_len(emul_selector, "endomul selector")?; + check_eval_len(endomul_scalar_selector, "endomul scalar selector")?; + + // Optional gates + + if let Some(range_check0_selector) = range_check0_selector { + check_eval_len(range_check0_selector, "range check 0 selector")? + } + if let Some(range_check1_selector) = range_check1_selector { + check_eval_len(range_check1_selector, "range check 1 selector")? + } + if let Some(foreign_field_add_selector) = foreign_field_add_selector { + check_eval_len(foreign_field_add_selector, "foreign field add selector")? + } + if let Some(foreign_field_mul_selector) = foreign_field_mul_selector { + check_eval_len(foreign_field_mul_selector, "foreign field mul selector")? + } + if let Some(xor_selector) = xor_selector { + check_eval_len(xor_selector, "xor selector")? + } + if let Some(rot_selector) = rot_selector { + check_eval_len(rot_selector, "rot selector")? + } + + // Lookup selectors + + if let Some(runtime_lookup_table_selector) = runtime_lookup_table_selector { + check_eval_len( + runtime_lookup_table_selector, + "runtime lookup table selector", + )? + } + if let Some(xor_lookup_selector) = xor_lookup_selector { + check_eval_len(xor_lookup_selector, "xor lookup selector")? + } + if let Some(lookup_gate_lookup_selector) = lookup_gate_lookup_selector { + check_eval_len(lookup_gate_lookup_selector, "lookup gate lookup selector")? + } + if let Some(range_check_lookup_selector) = range_check_lookup_selector { + check_eval_len(range_check_lookup_selector, "range check lookup selector")? + } + if let Some(foreign_field_mul_lookup_selector) = foreign_field_mul_lookup_selector { + check_eval_len( + foreign_field_mul_lookup_selector, + "foreign field mul lookup selector", + )? } - check_eval_len(generic_selector)?; - check_eval_len(poseidon_selector)?; Ok(()) } -fn to_batch<'a, G, EFqSponge, EFrSponge>( - verifier_index: &VerifierIndex, - proof: &'a ProverProof, +fn to_batch<'a, G, EFqSponge, EFrSponge, OpeningProof: OpenProof>( + verifier_index: &VerifierIndex, + proof: &'a ProverProof, public_input: &'a [::ScalarField], -) -> Result> +) -> Result> where G: KimchiCurve, G::BaseField: PrimeField, @@ -575,6 +760,8 @@ where //~ Essentially, this steps verifies that $f(\zeta) = t(\zeta) * Z_H(\zeta)$. //~ + let zk_rows = verifier_index.zk_rows; + if proof.prev_challenges.len() != verifier_index.prev_challenges { return Err(VerifyError::IncorrectPrevChallengesLength( verifier_index.prev_challenges, @@ -588,7 +775,15 @@ where } //~ 1. Check the length of evaluations inside the proof. - check_proof_evals_len(proof)?; + let chunk_size = { + let d1_size = verifier_index.domain.size(); + if d1_size < verifier_index.max_poly_size { + 1 + } else { + d1_size / verifier_index.max_poly_size + } + }; + check_proof_evals_len(proof, chunk_size)?; //~ 1. Commit to the negated public input polynomial. let public_comm = { @@ -599,23 +794,26 @@ where } let lgr_comm = verifier_index .srs() - .lagrange_bases - .get(&verifier_index.domain.size()) + .get_lagrange_basis(verifier_index.domain.size()) .expect("pre-computed committed lagrange bases not found"); let com: Vec<_> = lgr_comm.iter().take(verifier_index.public).collect(); - let elm: Vec<_> = public_input.iter().map(|s| -*s).collect(); - let public_comm = PolyComm::::multi_scalar_mul(&com, &elm); - verifier_index - .srs() - .mask_custom( - public_comm, - &PolyComm { - unshifted: vec![G::ScalarField::one(); 1], - shifted: None, - }, + if public_input.is_empty() { + PolyComm::new( + vec![verifier_index.srs().blinding_commitment(); chunk_size], + None, ) - .unwrap() - .commitment + } else { + let elm: Vec<_> = public_input.iter().map(|s| -*s).collect(); + let public_comm = PolyComm::::multi_scalar_mul(&com, &elm); + verifier_index + .srs() + .mask_custom( + public_comm.clone(), + &public_comm.map(|_| G::ScalarField::one()), + ) + .unwrap() + .commitment + } }; //~ 1. Run the [Fiat-Shamir argument](#fiat-shamir-argument). @@ -630,7 +828,7 @@ where ft_eval0, combined_inner_product, .. - } = proof.oracles::(verifier_index, &public_comm, public_input)?; + } = proof.oracles::(verifier_index, &public_comm, Some(public_input))?; //~ 1. Combine the chunked polynomials' evaluations //~ (TODO: most likely only the quotient polynomial is chunked) @@ -653,7 +851,9 @@ where //~ in which case the evaluation should be used in place of the commitment. let f_comm = { // the permutation is written manually (not using the expr framework) - let zkp = verifier_index.zkpm().evaluate(&oracles.zeta); + let permutation_vanishing_polynomial = verifier_index + .permutation_vanishing_polynomial_m() + .evaluate(&oracles.zeta); let alphas = all_alphas.get_alphas(ArgumentType::Permutation, permutation::CONSTRAINTS); @@ -663,7 +863,7 @@ where oracles.beta, oracles.gamma, alphas, - zkp, + permutation_vanishing_polynomial, )]; // other gates are implemented using the expression framework @@ -676,6 +876,7 @@ where joint_combiner: oracles.joint_combiner.as_ref().map(|j| j.1), endo_coefficient: verifier_index.endo, mds: &G::sponge_params().mds, + zk_rows, }; for (col, tokens) in &verifier_index.linearization.index_terms { @@ -742,6 +943,10 @@ where //~~ * index commitments that use the coefficients Column::Index(GateType::Generic), Column::Index(GateType::Poseidon), + Column::Index(GateType::CompleteAdd), + Column::Index(GateType::VarBaseMul), + Column::Index(GateType::EndoMul), + Column::Index(GateType::EndoMulScalar), ] .into_iter() //~~ * witness commitments @@ -750,6 +955,43 @@ where .chain((0..COLUMNS).map(Column::Coefficient)) //~~ * sigma commitments .chain((0..PERMUTS - 1).map(Column::Permutation)) + //~~ * optional gate commitments + .chain( + verifier_index + .range_check0_comm + .as_ref() + .map(|_| Column::Index(GateType::RangeCheck0)), + ) + .chain( + verifier_index + .range_check1_comm + .as_ref() + .map(|_| Column::Index(GateType::RangeCheck1)), + ) + .chain( + verifier_index + .foreign_field_add_comm + .as_ref() + .map(|_| Column::Index(GateType::ForeignFieldAdd)), + ) + .chain( + verifier_index + .foreign_field_mul_comm + .as_ref() + .map(|_| Column::Index(GateType::ForeignFieldMul)), + ) + .chain( + verifier_index + .xor_comm + .as_ref() + .map(|_| Column::Index(GateType::Xor16)), + ) + .chain( + verifier_index + .rot_comm + .as_ref() + .map(|_| Column::Index(GateType::Rot64)), + ) //~~ * lookup commitments //~ .chain( @@ -786,11 +1028,13 @@ where .lookup .as_ref() .ok_or(VerifyError::LookupCommitmentMissing)?; - let lookup_eval = proof + + let lookup_table = proof .evals - .lookup + .lookup_table .as_ref() .ok_or(VerifyError::LookupEvalsMissing)?; + let runtime_lookup_table = proof.evals.runtime_lookup_table.as_ref(); // compute table commitment let table_comm = { @@ -815,10 +1059,7 @@ where // add evaluation of the table polynomial evaluations.push(Evaluation { commitment: table_comm, - evaluations: vec![ - lookup_eval.table.zeta.clone(), - lookup_eval.table.zeta_omega.clone(), - ], + evaluations: vec![lookup_table.zeta.clone(), lookup_table.zeta_omega.clone()], degree_bound: None, }); @@ -828,8 +1069,7 @@ where .runtime .as_ref() .ok_or(VerifyError::IncorrectRuntimeProof)?; - let runtime_eval = lookup_eval - .runtime + let runtime_eval = runtime_lookup_table .as_ref() .map(|x| x.map_ref(&|x| x.clone())) .ok_or(VerifyError::IncorrectRuntimeProof)?; @@ -842,6 +1082,56 @@ where } } + for col in verifier_index + .lookup_index + .as_ref() + .map(|li| { + (li.runtime_tables_selector + .as_ref() + .map(|_| Column::LookupRuntimeSelector)) + .into_iter() + .chain( + li.lookup_selectors + .xor + .as_ref() + .map(|_| Column::LookupKindIndex(LookupPattern::Xor)), + ) + .chain( + li.lookup_selectors + .lookup + .as_ref() + .map(|_| Column::LookupKindIndex(LookupPattern::Lookup)), + ) + .chain( + li.lookup_selectors + .range_check + .as_ref() + .map(|_| Column::LookupKindIndex(LookupPattern::RangeCheck)), + ) + .chain( + li.lookup_selectors + .ffmul + .as_ref() + .map(|_| Column::LookupKindIndex(LookupPattern::ForeignFieldMul)), + ) + }) + .into_iter() + .flatten() + { + let evals = proof + .evals + .get_column(col) + .ok_or(VerifyError::MissingEvaluation(col))?; + evaluations.push(Evaluation { + commitment: context + .get_column(col) + .ok_or(VerifyError::MissingCommitment(col))? + .clone(), + evaluations: vec![evals.zeta.clone(), evals.zeta_omega.clone()], + degree_bound: None, + }); + } + // prepare for the opening proof verification let evaluation_points = vec![oracles.zeta, oracles.zeta * verifier_index.domain.group_gen]; Ok(BatchEvaluationProof { @@ -860,10 +1150,10 @@ where /// # Errors /// /// Will give error if `proof(s)` are not verified as valid. -pub fn verify( +pub fn verify>( group_map: &G::Map, - verifier_index: &VerifierIndex, - proof: &ProverProof, + verifier_index: &VerifierIndex, + proof: &ProverProof, public_input: &[G::ScalarField], ) -> Result<()> where @@ -877,7 +1167,7 @@ where proof, public_input, }]; - batch_verify::(group_map, &proofs) + batch_verify::(group_map, &proofs) } /// This function verifies the batch of zk-proofs @@ -887,9 +1177,9 @@ where /// # Errors /// /// Will give error if `srs` of `proof` is invalid or `verify` process fails. -pub fn batch_verify( +pub fn batch_verify>( group_map: &G::Map, - proofs: &[Context], + proofs: &[Context], ) -> Result<()> where G: KimchiCurve, @@ -913,14 +1203,9 @@ where // TODO: Account for the different SRS lengths let srs = proofs[0].verifier_index.srs(); for &Context { verifier_index, .. } in proofs { - if verifier_index.srs().g.len() != srs.g.len() { + if verifier_index.srs().max_poly_size() != srs.max_poly_size() { return Err(VerifyError::DifferentSRS); } - - // also make sure that the SRS is not smaller than the domain size - if verifier_index.srs().max_degree() < verifier_index.domain.size() { - return Err(VerifyError::SRSTooSmall); - } } //~ 1. Validate each proof separately following the [partial verification](#partial-verification) steps. @@ -931,7 +1216,7 @@ where public_input, } in proofs { - batch.push(to_batch::( + batch.push(to_batch::( verifier_index, proof, public_input, @@ -939,7 +1224,7 @@ where } //~ 1. Use the [`PolyCom.verify`](#polynomial-commitments) to verify the partially evaluated proofs. - if srs.verify::(group_map, &mut batch, &mut thread_rng()) { + if OpeningProof::verify(srs, group_map, &mut batch, &mut thread_rng()) { Ok(()) } else { Err(VerifyError::OpenProof) diff --git a/kimchi/src/verifier_index.rs b/kimchi/src/verifier_index.rs index 1163885f0d..ff4aaca48a 100644 --- a/kimchi/src/verifier_index.rs +++ b/kimchi/src/verifier_index.rs @@ -4,13 +4,13 @@ use crate::{ alphas::Alphas, circuits::{ + berkeley_columns::Column, expr::{Linearization, PolishToken}, lookup::{index::LookupSelectors, lookups::LookupInfo}, - polynomials::permutation::{zk_polynomial, zk_w3}, + polynomials::permutation::{vanishes_on_last_n_rows, zk_w}, wires::{COLUMNS, PERMUTS}, }, curve::KimchiCurve, - error::VerifierIndexError, prover_index::ProverIndex, }; use ark_ff::{One, PrimeField}; @@ -19,7 +19,7 @@ use mina_poseidon::FqSponge; use once_cell::sync::OnceCell; use poly_commitment::{ commitment::{CommitmentCurve, PolyComm}, - srs::SRS, + OpenProof, SRS as _, }; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use serde_with::serde_as; @@ -56,15 +56,18 @@ pub struct LookupVerifierIndex { #[serde_as] #[derive(Serialize, Deserialize, Debug, Clone)] -pub struct VerifierIndex { +pub struct VerifierIndex> { /// evaluation domain #[serde_as(as = "o1_utils::serialization::SerdeAs")] pub domain: D, /// maximal size of polynomial section pub max_poly_size: usize, + /// the number of randomized rows to achieve zero knowledge + pub zk_rows: u64, /// polynomial commitment keys #[serde(skip)] - pub srs: OnceCell>>, + #[serde(bound(deserialize = "OpeningProof::SRS: Default"))] + pub srs: Arc, /// number of public inputs pub public: usize, /// number of previous evaluation challenges, for recursive proving @@ -129,7 +132,7 @@ pub struct VerifierIndex { pub shift: [G::ScalarField; PERMUTS], /// zero-knowledge polynomial #[serde(skip)] - pub zkpm: OnceCell>, + pub permutation_vanishing_polynomial_m: OnceCell>, // TODO(mimoo): isn't this redundant with domain.d1.group_gen ? /// domain offset for zero-knowledge #[serde(skip)] @@ -142,20 +145,26 @@ pub struct VerifierIndex { pub lookup_index: Option>, #[serde(skip)] - pub linearization: Linearization>>, + pub linearization: Linearization>, Column>, /// The mapping between powers of alpha and constraints #[serde(skip)] pub powers_of_alpha: Alphas, } //~spec:endcode -impl ProverIndex { +impl> ProverIndex +where + G::BaseField: PrimeField, +{ /// Produces the [`VerifierIndex`] from the prover's [`ProverIndex`]. /// /// # Panics /// /// Will panic if `srs` cannot be in `cell`. - pub fn verifier_index(&self) -> VerifierIndex { + pub fn verifier_index(&self) -> VerifierIndex + where + VerifierIndex: Clone, + { if let Some(verifier_index) = &self.verifier_index { return verifier_index.clone(); } @@ -176,7 +185,7 @@ impl ProverIndex { .as_ref() .map(|cs| LookupVerifierIndex { joint_lookup_used: cs.configuration.lookup_info.features.joint_lookup_used, - lookup_info: cs.configuration.lookup_info.clone(), + lookup_info: cs.configuration.lookup_info, lookup_selectors: cs .lookup_selectors .as_ref() @@ -200,14 +209,11 @@ impl ProverIndex { VerifierIndex { domain, max_poly_size: self.max_poly_size, + zk_rows: self.cs.zk_rows, powers_of_alpha: self.powers_of_alpha.clone(), public: self.cs.public, prev_challenges: self.cs.prev_challenges, - srs: { - let cell = OnceCell::new(); - cell.set(Arc::clone(&self.srs)).unwrap(); - cell - }, + srs: Arc::clone(&self.srs), sigma_comm: array::from_fn(|i| { self.srs.commit_evaluations_non_hiding( @@ -233,21 +239,23 @@ impl ProverIndex { &self.column_evaluations.poseidon_selector8, )), - complete_add_comm: self.srs.commit_evaluations_non_hiding( + complete_add_comm: mask_fixed(self.srs.commit_evaluations_non_hiding( domain, &self.column_evaluations.complete_add_selector4, + )), + mul_comm: mask_fixed( + self.srs + .commit_evaluations_non_hiding(domain, &self.column_evaluations.mul_selector8), ), - mul_comm: self - .srs - .commit_evaluations_non_hiding(domain, &self.column_evaluations.mul_selector8), - emul_comm: self - .srs - .commit_evaluations_non_hiding(domain, &self.column_evaluations.emul_selector8), - - endomul_scalar_comm: self.srs.commit_evaluations_non_hiding( + emul_comm: mask_fixed( + self.srs + .commit_evaluations_non_hiding(domain, &self.column_evaluations.emul_selector8), + ), + + endomul_scalar_comm: mask_fixed(self.srs.commit_evaluations_non_hiding( domain, &self.column_evaluations.endomul_scalar_selector8, - ), + )), range_check0_comm: self .column_evaluations @@ -284,14 +292,20 @@ impl ProverIndex { .map(|eval8| self.srs.commit_evaluations_non_hiding(domain, eval8)), shift: self.cs.shift, - zkpm: { + permutation_vanishing_polynomial_m: { let cell = OnceCell::new(); - cell.set(self.cs.precomputations().zkpm.clone()).unwrap(); + cell.set( + self.cs + .precomputations() + .permutation_vanishing_polynomial_m + .clone(), + ) + .unwrap(); cell }, w: { let cell = OnceCell::new(); - cell.set(zk_w3(self.cs.domain.d1)).unwrap(); + cell.set(zk_w(self.cs.domain.d1, self.cs.zk_rows)).unwrap(); cell }, endo: self.cs.endo, @@ -301,27 +315,24 @@ impl ProverIndex { } } -impl VerifierIndex { +impl> VerifierIndex { /// Gets srs from [`VerifierIndex`] lazily - pub fn srs(&self) -> &Arc> + pub fn srs(&self) -> &Arc where G::BaseField: PrimeField, { - self.srs.get_or_init(|| { - let mut srs = SRS::::create(self.max_poly_size); - srs.add_lagrange_basis(self.domain); - Arc::new(srs) - }) + &self.srs } - /// Gets zkpm from [`VerifierIndex`] lazily - pub fn zkpm(&self) -> &DensePolynomial { - self.zkpm.get_or_init(|| zk_polynomial(self.domain)) + /// Gets permutation_vanishing_polynomial_m from [`VerifierIndex`] lazily + pub fn permutation_vanishing_polynomial_m(&self) -> &DensePolynomial { + self.permutation_vanishing_polynomial_m + .get_or_init(|| vanishes_on_last_n_rows(self.domain, self.zk_rows)) } /// Gets w from [`VerifierIndex`] lazily pub fn w(&self) -> &G::ScalarField { - self.w.get_or_init(|| zk_w3(self.domain)) + self.w.get_or_init(|| zk_w(self.domain, self.zk_rows)) } /// Deserializes a [`VerifierIndex`] from a file, given a pointer to an SRS and an optional offset in the file. @@ -330,12 +341,15 @@ impl VerifierIndex { /// /// Will give error if it fails to deserialize from file or unable to set `srs` in `verifier_index`. pub fn from_file( - srs: Option>>, + srs: Arc, path: &Path, offset: Option, // TODO: we shouldn't have to pass these endo: G::ScalarField, - ) -> Result { + ) -> Result + where + OpeningProof::SRS: Default, + { // open file let file = File::open(path).map_err(|e| e.to_string())?; @@ -350,13 +364,7 @@ impl VerifierIndex { .map_err(|e| e.to_string())?; // fill in the rest - if let Some(srs) = srs { - verifier_index - .srs - .set(srs) - .map_err(|_| VerifierIndexError::SRSHasBeenSet.to_string())?; - }; - + verifier_index.srs = srs; verifier_index.endo = endo; Ok(verifier_index) @@ -389,11 +397,12 @@ impl VerifierIndex { pub fn digest>( &self, ) -> G::BaseField { - let mut fq_sponge = EFqSponge::new(G::OtherCurve::sponge_params()); + let mut fq_sponge = EFqSponge::new(G::other_curve_sponge_params()); // We fully expand this to make the compiler check that we aren't missing any commitments let VerifierIndex { domain: _, max_poly_size: _, + zk_rows: _, srs: _, public: _, prev_challenges: _, @@ -420,7 +429,7 @@ impl VerifierIndex { lookup_index, shift: _, - zkpm: _, + permutation_vanishing_polynomial_m: _, w: _, endo: _, diff --git a/optimism/.gitignore b/optimism/.gitignore new file mode 100644 index 0000000000..53df36bb78 --- /dev/null +++ b/optimism/.gitignore @@ -0,0 +1 @@ +rpcs.sh diff --git a/optimism/Cargo.toml b/optimism/Cargo.toml new file mode 100644 index 0000000000..da634f3030 --- /dev/null +++ b/optimism/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "kimchi_optimism" +version = "0.1.0" +description = "MIPS demo" +repository = "https://github.com/o1-labs/proof-systems" +homepage = "https://o1-labs.github.io/proof-systems/" +documentation = "https://o1-labs.github.io/proof-systems/rustdoc/" +readme = "README.md" +edition = "2021" +license = "Apache-2.0" + +[lib] +path = "src/lib.rs" + +[dependencies] +ark-bn254 = { version = "0.3.0" } +kimchi = { path = "../kimchi", version = "0.1.0", features = [ "bn254" ] } +poly-commitment = { path = "../poly-commitment", version = "0.1.0" } +groupmap = { path = "../groupmap", version = "0.1.0" } +mina-curves = { path = "../curves", version = "0.1.0" } +mina-poseidon = { path = "../poseidon", version = "0.1.0" } +elf = "0.7.2" +rmp-serde = "1.1.1" +serde_json = "1.0.91" +serde = "1.0.130" +serde_with = "1.10.0" +ark-poly = { version = "0.3.0", features = [ "parallel" ] } +ark-ff = { version = "0.3.0", features = [ "parallel" ] } +clap = "4.4.6" +hex = "0.4.3" +regex = "1.10.2" +libflate = "2" +base64 = "0.21.5" diff --git a/optimism/README.md b/optimism/README.md new file mode 100644 index 0000000000..2dbf25c375 --- /dev/null +++ b/optimism/README.md @@ -0,0 +1,18 @@ +To run the demo: +* create an executable file `rpcs.sh` that looks like + ```bash + #!/usr/bin/env bash + export L1RPC=http://xxxxxxxxx + export L2RPC=http://xxxxxxxxx + ``` +* run the `run-code.sh` script. + +This will +* generate the initial state, +* execute the OP program, +* execute the OP program through the cannon MIPS VM, +* execute the OP program through the kimchi MIPS VM prover. + +The initial state will be output to a file with format `YYYY-MM-DD-HH-MM-SS-op-program-data-log.sh`. + +If you want to re-run against an existing state, pass the environment variable `FILENAME=YYYY-MM-DD-HH-MM-SS-op-program-data-log.sh` to the `run-code.sh` script. diff --git a/optimism/ethereum-optimism b/optimism/ethereum-optimism new file mode 160000 index 0000000000..c83cd947d4 --- /dev/null +++ b/optimism/ethereum-optimism @@ -0,0 +1 @@ +Subproject commit c83cd947d419aa2c213583a32872bc350a69e566 diff --git a/optimism/generate-config.sh b/optimism/generate-config.sh new file mode 100755 index 0000000000..3e8dae0221 --- /dev/null +++ b/optimism/generate-config.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env bash +set -euo pipefail + +source rpcs.sh + +# L2 output oracle on Goerli +# L2_OUTPUT_ORACLE=0xE6Dfba0953616Bacab0c9A8ecb3a9BBa77FC15c0 +# L2 output oracle on Sepolia +L2_OUTPUT_ORACLE=0x90E9c4f8a994a250F6aEfd61CAFb4F2e895D458F + +L2_FINALIZED_NUMBER=$(cast block finalized --rpc-url "${L2RPC}" -f number) +echo "Finalize number: ${L2_FINALIZED_NUMBER}" 1>&2 +L2_FINALIZED_HASH=$(cast block "${L2_FINALIZED_NUMBER}" --rpc-url "${L2RPC}" -f hash) + +L1_FINALIZED_NUMBER=$(cast block finalized --rpc-url "${L1RPC}" -f number) +L1_FINALIZED_HASH=$(cast block "${L1_FINALIZED_NUMBER}" --rpc-url "${L1RPC}" -f hash) + +OUTPUT_INDEX=$(cast call --rpc-url "${L1RPC}" "${L2_OUTPUT_ORACLE}" 'getL2OutputIndexAfter(uint256) returns(uint256)' "${L2_FINALIZED_NUMBER}") +OUTPUT_INDEX=$((OUTPUT_INDEX-1)) + +OUTPUT=$(cast call --rpc-url "${L1RPC}" "${L2_OUTPUT_ORACLE}" 'getL2Output(uint256) returns(bytes32,uint128,uint128)' "${OUTPUT_INDEX}") +OUTPUT_ROOT=$(echo ${OUTPUT} | cut -d' ' -f 1) +OUTPUT_TIMESTAMP=$(echo ${OUTPUT} | cut -d' ' -f 2) +OUTPUT_L2BLOCK_NUMBER=$(echo ${OUTPUT} | cut -d' ' -f 3) + +L1_HEAD=$L1_FINALIZED_HASH +L2_CLAIM=$OUTPUT_ROOT +L2_BLOCK_NUMBER=$OUTPUT_L2BLOCK_NUMBER + +STARTING_L2BLOCK_NUMBER=$((L2_BLOCK_NUMBER-100)) +STARTING_OUTPUT_INDEX=$(cast call --rpc-url "${L1RPC}" "${L2_OUTPUT_ORACLE}" 'getL2OutputIndexAfter(uint256) returns(uint256)' "${STARTING_L2BLOCK_NUMBER}") +STARTING_OUTPUT=$(cast call --rpc-url "${L1RPC}" "${L2_OUTPUT_ORACLE}" 'getL2Output(uint256) returns(bytes32,uint128,uint128)' "${STARTING_OUTPUT_INDEX}") +STARTING_OUTPUT_ROOT=$(echo ${OUTPUT} | cut -d' ' -f 1) +L2_HEAD_NUMBER=$(echo ${OUTPUT} | cut -d' ' -f 3) +L2_HEAD=$(cast block "${L2_HEAD_NUMBER}" --rpc-url "${L2RPC}" -f hash) + +TODAY=$(date +"%Y-%m-%d-%H-%M-%S") +FILENAME=${TODAY}-op-program-data-log.sh +OP_PROGRAM_DATA_DIR=$(pwd)/op-program-db-sepolia-${TODAY} + +echo "export L1_HEAD=${L1_HEAD}" >> ${FILENAME} +echo "export L2_HEAD=${L2_HEAD}" >> ${FILENAME} +echo "export L2_BLOCK_NUMBER=${L2_BLOCK_NUMBER}" >> ${FILENAME} +echo "export STARTING_OUTPUT_ROOT=${STARTING_OUTPUT_ROOT}" >> ${FILENAME} +echo "export L2_CLAIM=${L2_CLAIM}" >> ${FILENAME} +echo "export OP_PROGRAM_DATA_DIR=${OP_PROGRAM_DATA_DIR}" >> ${FILENAME} +echo "export L1RPC=${L1RPC}" >> ${FILENAME} +echo "export L2RPC=${L2RPC}" >> ${FILENAME} + +echo "${FILENAME}" diff --git a/optimism/run-code.sh b/optimism/run-code.sh new file mode 100755 index 0000000000..c664e58025 --- /dev/null +++ b/optimism/run-code.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +set -euo pipefail + +source rpcs.sh + +set +u +if [ -z "${FILENAME}" ]; then + FILENAME="$(./generate-config.sh)" +fi +set -u + +source $FILENAME + +./run-op-program.sh + +./run-vm.sh diff --git a/optimism/run-op-program.sh b/optimism/run-op-program.sh new file mode 100755 index 0000000000..c16072e8c9 --- /dev/null +++ b/optimism/run-op-program.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash +set -euo pipefail + +make -C ./ethereum-optimism/op-program op-program +make -C ./ethereum-optimism/cannon cannon + +set -x +./ethereum-optimism/op-program/bin/op-program \ + --log.level DEBUG \ + --l1 $L1RPC \ + --l2 $L2RPC \ + --network sepolia \ + --datadir ${OP_PROGRAM_DATA_DIR} \ + --l1.head $L1_HEAD \ + --l2.head $L2_HEAD \ + --l2.outputroot $STARTING_OUTPUT_ROOT \ + --l2.claim $L2_CLAIM \ + --l2.blocknumber $L2_BLOCK_NUMBER + +./ethereum-optimism/cannon/bin/cannon load-elf --path=./ethereum-optimism/op-program/bin/op-program-client.elf + +./ethereum-optimism/cannon/bin/cannon run \ + --pprof.cpu \ + --info-at '%10000000' \ + --proof-at never \ + --input ./state.json \ + -- \ + ./ethereum-optimism/op-program/bin/op-program \ + --log.level DEBUG \ + --l1 ${L1RPC} \ + --l2 ${L2RPC} \ + --network sepolia \ + --datadir ${OP_PROGRAM_DATA_DIR} \ + --l1.head ${L1_HEAD} \ + --l2.head ${L2_HEAD} \ + --l2.outputroot ${STARTING_OUTPUT_ROOT} \ + --l2.claim ${L2_CLAIM} \ + --l2.blocknumber ${L2_BLOCK_NUMBER} \ + --server diff --git a/optimism/run-vm.sh b/optimism/run-vm.sh new file mode 100755 index 0000000000..4f24506b4e --- /dev/null +++ b/optimism/run-vm.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash +set -euo pipefail + +cargo run -p kimchi_optimism -- \ + --pprof-cpu \ + --info-at '%10000000' \ + --proof-at never \ + --input ./state.json \ + -- \ + ./ethereum-optimism/op-program/bin/op-program \ + --log.level DEBUG \ + --l1 ${L1RPC} \ + --l2 ${L2RPC} \ + --network sepolia \ + --datadir ${OP_PROGRAM_DATA_DIR} \ + --l1.head ${L1_HEAD} \ + --l2.head ${L2_HEAD} \ + --l2.outputroot ${STARTING_OUTPUT_ROOT} \ + --l2.claim ${L2_CLAIM} \ + --l2.blocknumber ${L2_BLOCK_NUMBER} \ + --server diff --git a/optimism/src/cannon.rs b/optimism/src/cannon.rs new file mode 100644 index 0000000000..6b0049c040 --- /dev/null +++ b/optimism/src/cannon.rs @@ -0,0 +1,137 @@ +// Data structure and stuff for compatibility with Cannon + +use base64::{engine::general_purpose, Engine as _}; +use libflate::zlib::Decoder; +use regex::Regex; +use serde::{Deserialize, Deserializer, Serialize}; +use std::io::Read; + +pub const PAGE_SIZE: usize = 4096; + +#[derive(Serialize, Deserialize, Debug)] +pub struct Page { + pub index: u32, + #[serde(deserialize_with = "from_base64")] + pub data: Vec, +} + +fn from_base64<'de, D>(deserializer: D) -> Result, D::Error> +where + D: Deserializer<'de>, +{ + let s: String = Deserialize::deserialize(deserializer)?; + let b64_decoded = general_purpose::STANDARD.decode(s).unwrap(); + let mut decoder = Decoder::new(&b64_decoded[..]).unwrap(); + let mut data = Vec::new(); + decoder.read_to_end(&mut data).unwrap(); + assert_eq!(data.len(), PAGE_SIZE); + Ok(data) +} + +// The renaming below keeps compatibility with OP Cannon's state format +#[derive(Serialize, Deserialize, Debug)] +pub struct State { + pub memory: Vec, + #[serde(rename = "preimageKey")] + pub preimage_key: String, + #[serde(rename = "preimageOffset")] + pub preimage_offset: u32, + pub pc: u32, + #[serde(rename = "nextPC")] + next_pc: u32, // + pub lo: u32, + pub hi: u32, + pub heap: u32, + exit: u8, + pub exited: bool, + pub step: u64, + pub registers: [u32; 32], + pub last_hint: Option>, +} + +#[derive(Clone, Debug, PartialEq)] +pub enum StepFrequency { + Never, + Always, + Exactly(u64), + Every(u64), +} + +// Simple parser for Cannon's "frequency format" +// A frequency input is either +// - never/always +// - = (only at step n) +// - % (every steps multiple of n) +pub fn step_frequency_parser(s: &str) -> std::result::Result { + use StepFrequency::*; + + let mod_re = Regex::new(r"%([0-9]+)").unwrap(); + let eq_re = Regex::new(r"=([0-9]+)").unwrap(); + + match s { + "never" => Ok(Never), + "always" => Ok(Always), + s => { + if let Some(m) = mod_re.captures(s) { + Ok(Every(m[1].parse::().unwrap())) + } else if let Some(m) = eq_re.captures(s) { + Ok(Exactly(m[1].parse::().unwrap())) + } else { + Err(format!("Unknown frequency format {}", s)) + } + } + } +} + +#[cfg(test)] +mod tests { + + use super::*; + + #[test] + fn sp_parser() { + use StepFrequency::*; + assert_eq!(step_frequency_parser("never"), Ok(Never)); + assert_eq!(step_frequency_parser("always"), Ok(Always)); + assert_eq!(step_frequency_parser("=123"), Ok(Exactly(123))); + assert_eq!(step_frequency_parser("%123"), Ok(Every(123))); + assert!(step_frequency_parser("@123").is_err()); + } +} + +impl ToString for State { + // A very debatable and incomplete, but serviceable, `to_string` implementation. + fn to_string(&self) -> String { + format!( + "memory_size (length): {}\nfirst page size: {}\npreimage key: {}\npreimage offset:{}\npc: {}\nlo: {}\nhi: {}\nregisters:{:#?} ", + self.memory.len(), + self.memory[0].data.len(), + self.preimage_key, + self.preimage_offset, + self.pc, + self.lo, + self.hi, + self.registers + ) + } +} + +#[derive(Debug)] +pub struct HostProgram { + pub name: String, + pub arguments: Vec, +} + +#[derive(Debug)] +pub struct VmConfiguration { + pub input_state_file: String, + pub output_state_file: String, + pub metadata_file: String, + pub proof_at: StepFrequency, + pub stop_at: StepFrequency, + pub info_at: StepFrequency, + pub proof_fmt: String, + pub snapshot_fmt: String, + pub pprof_cpu: bool, + pub host: Option, +} diff --git a/optimism/src/lib.rs b/optimism/src/lib.rs new file mode 100644 index 0000000000..da4f2ad95d --- /dev/null +++ b/optimism/src/lib.rs @@ -0,0 +1,2 @@ +pub mod cannon; +pub mod mips; diff --git a/optimism/src/main.rs b/optimism/src/main.rs new file mode 100644 index 0000000000..732ceddb3d --- /dev/null +++ b/optimism/src/main.rs @@ -0,0 +1,143 @@ +use clap::{arg, value_parser, Arg, ArgAction, Command}; +use kimchi_optimism::{ + cannon::{State, VmConfiguration}, + mips::witness, +}; +use std::{fs::File, io::BufReader, process::ExitCode}; + +fn cli() -> VmConfiguration { + use kimchi_optimism::cannon::*; + + let app_name = "zkvm"; + let cli = Command::new(app_name) + .version("0.1") + .about("MIPS-based zkvm") + .arg(arg!(--input "initial state file").default_value("state.json")) + .arg(arg!(--output "output state file").default_value("out.json")) + .arg(arg!(--meta "metadata file").default_value("meta.json")) + // The CLI arguments below this line are ignored at this point + .arg( + Arg::new("proof-at") + .short('p') + .long("proof-at") + .value_name("FREQ") + .default_value("never") + .value_parser(step_frequency_parser), + ) + .arg( + Arg::new("proof-fmt") + .long("proof-fmt") + .value_name("FORMAT") + .default_value("proof-%d.json"), + ) + .arg( + Arg::new("snapshot-fmt") + .long("snapshot-fmt") + .value_name("FORMAT") + .default_value("state-%d.json"), + ) + .arg( + Arg::new("stop-at") + .long("stop-at") + .value_name("FREQ") + .default_value("never") + .value_parser(step_frequency_parser), + ) + .arg( + Arg::new("info-at") + .long("info-at") + .value_name("FREQ") + .default_value("never") + .value_parser(step_frequency_parser), + ) + .arg( + Arg::new("pprof-cpu") + .long("pprof-cpu") + .action(ArgAction::SetTrue), + ) + .arg( + arg!(host: [HOST] "host program specification [host program arguments]") + .num_args(1..) + .last(true) + .value_parser(value_parser!(String)), + ); + + let cli = cli.get_matches(); + + let input_state_file = cli.get_one::("input").unwrap(); + + let output_state_file = cli.get_one::("output").unwrap(); + + let metadata_file = cli.get_one::("meta").unwrap(); + + let proof_at = cli.get_one::("proof-at").unwrap(); + let info_at = cli.get_one::("info-at").unwrap(); + let stop_at = cli.get_one::("stop-at").unwrap(); + + let proof_fmt = cli.get_one::("proof-fmt").unwrap(); + let snapshot_fmt = cli.get_one::("snapshot-fmt").unwrap(); + let pprof_cpu = cli.get_one::("pprof-cpu").unwrap(); + + let host_spec = cli + .get_many::("host") + .map(|vals| vals.collect::>()) + .unwrap_or_default(); + + let host = if host_spec.is_empty() { + None + } else { + Some(HostProgram { + name: host_spec[0].to_string(), + arguments: host_spec[1..] + .to_vec() + .iter() + .map(|x| x.to_string()) + .collect(), + }) + }; + + VmConfiguration { + input_state_file: input_state_file.to_string(), + output_state_file: output_state_file.to_string(), + metadata_file: metadata_file.to_string(), + proof_at: proof_at.clone(), + stop_at: stop_at.clone(), + info_at: info_at.clone(), + proof_fmt: proof_fmt.to_string(), + snapshot_fmt: snapshot_fmt.to_string(), + pprof_cpu: *pprof_cpu, + host, + } +} + +pub fn main() -> ExitCode { + let configuration = cli(); + + println!("configuration\n{:#?}", configuration); + + let file = File::open(configuration.input_state_file).expect("Error opening input state file "); + + let reader = BufReader::new(file); + // Read the JSON contents of the file as an instance of `State`. + let state: State = serde_json::from_reader(reader).expect("Error reading input state file"); + + if let Some(host_program) = configuration.host { + println!("Launching host program {}", host_program.name); + + let _child = std::process::Command::new(host_program.name) + .args(host_program.arguments) + .spawn() + .expect("Could not spawn host process"); + }; + + let page_size = 1 << 12; + + let mut env = witness::Env::::create(page_size, state); + + while !env.halt { + env.step(); + } + + // TODO: Logic + ExitCode::FAILURE +} diff --git a/optimism/src/mips/mod.rs b/optimism/src/mips/mod.rs new file mode 100644 index 0000000000..2499222bb4 --- /dev/null +++ b/optimism/src/mips/mod.rs @@ -0,0 +1,2 @@ +pub mod registers; +pub mod witness; diff --git a/optimism/src/mips/registers.rs b/optimism/src/mips/registers.rs new file mode 100644 index 0000000000..89a1ee23b8 --- /dev/null +++ b/optimism/src/mips/registers.rs @@ -0,0 +1,47 @@ +use serde::{Deserialize, Serialize}; +use std::ops::{Index, IndexMut}; + +pub const NUM_REGISTERS: usize = 34; + +#[derive(Clone, Default, Debug, Serialize, Deserialize)] +pub struct Registers { + pub general_purpose: [T; 32], + pub hi: T, + pub lo: T, +} + +impl Registers { + pub fn iter(&self) -> impl Iterator { + self.general_purpose.iter().chain([&self.hi, &self.lo]) + } +} + +impl Index for Registers { + type Output = T; + + fn index(&self, index: usize) -> &Self::Output { + if index < 32 { + &self.general_purpose[index] + } else if index == 32 { + &self.hi + } else if index == 33 { + &self.lo + } else { + panic!("Index out of bounds"); + } + } +} + +impl IndexMut for Registers { + fn index_mut(&mut self, index: usize) -> &mut Self::Output { + if index < 32 { + &mut self.general_purpose[index] + } else if index == 32 { + &mut self.hi + } else if index == 33 { + &mut self.lo + } else { + panic!("Index out of bounds"); + } + } +} diff --git a/optimism/src/mips/witness.rs b/optimism/src/mips/witness.rs new file mode 100644 index 0000000000..dbaa7b30a8 --- /dev/null +++ b/optimism/src/mips/witness.rs @@ -0,0 +1,99 @@ +use crate::{cannon::State, mips::registers::Registers}; +use ark_ff::Field; +use std::array; + +pub const NUM_GLOBAL_LOOKUP_TERMS: usize = 1; +pub const NUM_DECODING_LOOKUP_TERMS: usize = 2; +pub const NUM_INSTRUCTION_LOOKUP_TERMS: usize = 5; +pub const NUM_LOOKUP_TERMS: usize = + NUM_GLOBAL_LOOKUP_TERMS + NUM_DECODING_LOOKUP_TERMS + NUM_INSTRUCTION_LOOKUP_TERMS; +pub const SCRATCH_SIZE: usize = 25; + +#[derive(Clone)] +pub struct SyscallEnv { + pub heap: u32, // Heap pointer (actually unused in Cannon as of [2023-10-18]) + pub preimage_offset: u32, + pub preimage_key: Vec, + pub last_hint: Option>, +} + +impl SyscallEnv { + pub fn create(state: &State) -> Self { + SyscallEnv { + heap: state.heap, + preimage_key: state.preimage_key.as_bytes().to_vec(), // Might not be correct + preimage_offset: state.preimage_offset, + last_hint: state.last_hint.clone(), + } + } +} + +#[derive(Clone)] +pub struct Env { + pub instruction_counter: usize, + pub memory: Vec<(u32, Vec)>, + pub memory_write_index: Vec<(u32, Vec)>, + pub registers: Registers, + pub registers_write_index: Registers, + pub instruction_pointer: u32, + pub scratch_state_idx: usize, + pub scratch_state: [Fp; SCRATCH_SIZE], + pub halt: bool, + pub syscall_env: SyscallEnv, +} + +fn fresh_scratch_state() -> [Fp; N] { + array::from_fn(|_| Fp::zero()) +} + +impl Env { + pub fn create(page_size: usize, state: State) -> Self { + let initial_instruction_pointer = state.pc; + + let syscall_env = SyscallEnv::create(&state); + + let mut initial_memory: Vec<(u32, Vec)> = state + .memory + .into_iter() + // Check that the conversion from page data is correct + .map(|page| (page.index, page.data)) + .collect(); + + for (_address, initial_memory) in initial_memory.iter_mut() { + initial_memory.extend((0..(page_size - initial_memory.len())).map(|_| 0u8)); + assert_eq!(initial_memory.len(), page_size); + } + + let memory_offsets = initial_memory + .iter() + .map(|(offset, _)| *offset) + .collect::>(); + + let initial_registers = Registers { + lo: state.lo, + hi: state.hi, + general_purpose: state.registers, + }; + + Env { + instruction_counter: state.step as usize, + memory: initial_memory.clone(), + memory_write_index: memory_offsets + .iter() + .map(|offset| (*offset, vec![0usize; page_size])) + .collect(), + registers: initial_registers.clone(), + registers_write_index: Registers::default(), + instruction_pointer: initial_instruction_pointer, + scratch_state_idx: 0, + scratch_state: fresh_scratch_state(), + halt: state.exited, + syscall_env, + } + } + + pub fn step(&mut self) { + // TODO + self.halt = true; + } +} diff --git a/poly-commitment/Cargo.toml b/poly-commitment/Cargo.toml index 6f40ad5f7d..890555082e 100644 --- a/poly-commitment/Cargo.toml +++ b/poly-commitment/Cargo.toml @@ -37,6 +37,7 @@ ocaml-gen = { version = "0.1.5", optional = true } [dev-dependencies] colored = "2.0.0" rand_chacha = { version = "0.3.0" } +ark-bn254 = { version = "0.3.0" } [features] ocaml_types = [ "ocaml", "ocaml-gen" ] diff --git a/poly-commitment/src/commitment.rs b/poly-commitment/src/commitment.rs index 001e4b021e..3d6cdf2411 100644 --- a/poly-commitment/src/commitment.rs +++ b/poly-commitment/src/commitment.rs @@ -7,6 +7,7 @@ //! 3. Verify batch of batched opening proofs use crate::srs::endos; +use crate::SRS as SRSTrait; use crate::{error::CommitmentError, srs::SRS}; use ark_ec::{ models::short_weierstrass_jacobian::GroupAffine as SWJAffine, msm::VariableBaseMSM, @@ -357,7 +358,12 @@ pub trait CommitmentCurve: AffineCurve { fn to_coordinates(&self) -> Option<(Self::BaseField, Self::BaseField)>; fn of_coordinates(x: Self::BaseField, y: Self::BaseField) -> Self; +} +/// A trait extending CommitmentCurve for endomorphisms. +/// Unfortunately, we can't specify that `AffineCurve`, +/// so usage of this traits must manually bind `G::BaseField: PrimeField`. +pub trait EndoCurve: CommitmentCurve { /// Combine where x1 = one fn combine_one(g1: &[Self], g2: &[Self], x2: Self::ScalarField) -> Vec { crate::combine::window_combine(g1, g2, Self::ScalarField::one(), x2) @@ -384,10 +390,7 @@ pub trait CommitmentCurve: AffineCurve { } } -impl CommitmentCurve for SWJAffine

-where - P::BaseField: PrimeField, -{ +impl CommitmentCurve for SWJAffine

{ type Params = P; type Map = BWParameters

; @@ -402,7 +405,12 @@ where fn of_coordinates(x: P::BaseField, y: P::BaseField) -> SWJAffine

{ SWJAffine::

::new(x, y, false) } +} +impl EndoCurve for SWJAffine

+where + P::BaseField: PrimeField, +{ fn combine_one(g1: &[Self], g2: &[Self], x2: Self::ScalarField) -> Vec { crate::combine::affine_window_combine_one(g1, g2, x2) } @@ -500,7 +508,7 @@ where /// Contains the batch evaluation // TODO: I think we should really change this name to something more correct -pub struct BatchEvaluationProof<'a, G, EFqSponge> +pub struct BatchEvaluationProof<'a, G, EFqSponge, OpeningProof> where G: AffineCurve, EFqSponge: FqSponge, @@ -514,23 +522,114 @@ where /// scaling factor for polynomials pub evalscale: G::ScalarField, /// batched opening proof - pub opening: &'a OpeningProof, + pub opening: &'a OpeningProof, pub combined_inner_product: G::ScalarField, } -impl SRS { +pub fn combine_commitments( + evaluations: &[Evaluation], + scalars: &mut Vec, + points: &mut Vec, + polyscale: G::ScalarField, + rand_base: G::ScalarField, +) { + let mut xi_i = G::ScalarField::one(); + + for Evaluation { + commitment, + degree_bound, + .. + } in evaluations + .iter() + .filter(|x| !x.commitment.unshifted.is_empty()) + { + // iterating over the polynomial segments + for comm_ch in &commitment.unshifted { + scalars.push(rand_base * xi_i); + points.push(*comm_ch); + + xi_i *= polyscale; + } + + if let Some(_m) = degree_bound { + if let Some(comm_ch) = commitment.shifted { + if !comm_ch.is_zero() { + // polyscale^i sum_j evalscale^j elm_j^{N - m} f(elm_j) + scalars.push(rand_base * xi_i); + points.push(comm_ch); + + xi_i *= polyscale; + } + } + } + } +} + +pub fn combine_evaluations( + evaluations: &Vec>, + polyscale: G::ScalarField, +) -> Vec { + let mut xi_i = G::ScalarField::one(); + let mut acc = { + let num_evals = if !evaluations.is_empty() { + evaluations[0].evaluations.len() + } else { + 0 + }; + vec![G::ScalarField::zero(); num_evals] + }; + + for Evaluation { + evaluations, + degree_bound, + .. + } in evaluations + .iter() + .filter(|x| !x.commitment.unshifted.is_empty()) + { + // iterating over the polynomial segments + for j in 0..evaluations[0].len() { + for i in 0..evaluations.len() { + acc[i] += evaluations[i][j] * xi_i; + } + xi_i *= polyscale; + } + + if let Some(_m) = degree_bound { + todo!("Misaligned chunked commitments are not supported") + } + } + + acc +} + +impl SRSTrait for SRS { + /// The maximum polynomial degree that can be committed to + fn max_poly_size(&self) -> usize { + self.g.len() + } + + fn get_lagrange_basis(&self, domain_size: usize) -> Option<&Vec>> { + self.lagrange_bases.get(&domain_size) + } + + fn blinding_commitment(&self) -> G { + self.h + } + /// Commits a polynomial, potentially splitting the result in multiple commitments. - pub fn commit( + fn commit( &self, plnm: &DensePolynomial, + num_chunks: usize, max: Option, rng: &mut (impl RngCore + CryptoRng), ) -> BlindedCommitment { - self.mask(self.commit_non_hiding(plnm, max), rng) + self.mask(self.commit_non_hiding(plnm, num_chunks, max), rng) } /// Turns a non-hiding polynomial commitment into a hidding polynomial commitment. Transforms each given `` into `( + wH, w)` with a random `w` per commitment. - pub fn mask( + fn mask( &self, comm: PolyComm, rng: &mut (impl RngCore + CryptoRng), @@ -540,7 +639,7 @@ impl SRS { } /// Same as [SRS::mask] except that you can pass the blinders manually. - pub fn mask_custom( + fn mask_custom( &self, com: PolyComm, blinders: &PolyComm, @@ -561,13 +660,15 @@ impl SRS { /// This function commits a polynomial using the SRS' basis of size `n`. /// - `plnm`: polynomial to commit to with max size of sections + /// - `num_chunks`: the number of unshifted commitments to be included in the output polynomial commitment /// - `max`: maximal degree of the polynomial (not inclusive), if none, no degree bound /// The function returns an unbounded commitment vector (which splits the commitment into several commitments of size at most `n`), /// as well as an optional bounded commitment (if `max` is set). /// Note that a maximum degree cannot (and doesn't need to) be enforced via a shift if `max` is a multiple of `n`. - pub fn commit_non_hiding( + fn commit_non_hiding( &self, plnm: &DensePolynomial, + num_chunks: usize, max: Option, ) -> PolyComm { let is_zero = plnm.is_zero(); @@ -588,6 +689,10 @@ impl SRS { }); } + for _ in unshifted.len()..num_chunks { + unshifted.push(G::zero()); + } + // committing only last chunk shifted to the right edge of SRS let shifted = match max { None => None, @@ -613,7 +718,7 @@ impl SRS { PolyComm:: { unshifted, shifted } } - pub fn commit_evaluations_non_hiding( + fn commit_evaluations_non_hiding( &self, domain: D, plnm: &Evaluations>, @@ -638,7 +743,7 @@ impl SRS { } } - pub fn commit_evaluations( + fn commit_evaluations( &self, domain: D, plnm: &Evaluations>, @@ -646,7 +751,9 @@ impl SRS { ) -> BlindedCommitment { self.mask(self.commit_evaluations_non_hiding(domain, plnm), rng) } +} +impl SRS { /// This function verifies batch of batched polynomial commitment opening proofs /// batch: batch of batched polynomial commitment opening proofs /// vector of evaluation points @@ -660,7 +767,7 @@ impl SRS { pub fn verify( &self, group_map: &G::Map, - batch: &mut [BatchEvaluationProof], + batch: &mut [BatchEvaluationProof>], rng: &mut RNG, ) -> bool where @@ -798,38 +905,13 @@ impl SRS { // sum_j evalscale^j (sum_i polyscale^i f_i) (elm_j) // == sum_j sum_i evalscale^j polyscale^i f_i(elm_j) // == sum_i polyscale^i sum_j evalscale^j f_i(elm_j) - { - let mut xi_i = G::ScalarField::one(); - - for Evaluation { - commitment, - degree_bound, - .. - } in evaluations - .iter() - .filter(|x| !x.commitment.unshifted.is_empty()) - { - // iterating over the polynomial segments - for comm_ch in &commitment.unshifted { - scalars.push(rand_base_i_c_i * xi_i); - points.push(*comm_ch); - - xi_i *= *polyscale; - } - - if let Some(_m) = degree_bound { - if let Some(comm_ch) = commitment.shifted { - if !comm_ch.is_zero() { - // polyscale^i sum_j evalscale^j elm_j^{N - m} f(elm_j) - scalars.push(rand_base_i_c_i * xi_i); - points.push(comm_ch); - - xi_i *= *polyscale; - } - } - } - } - }; + combine_commitments( + evaluations, + &mut scalars, + &mut points, + *polyscale, + rand_base_i_c_i, + ); scalars.push(rand_base_i_c_i * *combined_inner_product); points.push(u); @@ -879,12 +961,14 @@ mod tests { let mut srs = SRS::::create(n); srs.add_lagrange_basis(domain); + let num_chunks = domain.size() / srs.g.len(); + let expected_lagrange_commitments: Vec<_> = (0..n) .map(|i| { let mut e = vec![Fp::zero(); n]; e[i] = Fp::one(); let p = Evaluations::>::from_vec_and_domain(e, domain).interpolate(); - srs.commit_non_hiding(&p, None) + srs.commit_non_hiding(&p, num_chunks, None) }) .collect(); @@ -905,12 +989,14 @@ mod tests { let mut srs = SRS::::create(n / 2); srs.add_lagrange_basis(domain); + let num_chunks = domain.size() / srs.g.len(); + let expected_lagrange_commitments: Vec<_> = (0..n) .map(|i| { let mut e = vec![Fp::zero(); n]; e[i] = Fp::one(); let p = Evaluations::>::from_vec_and_domain(e, domain).interpolate(); - srs.commit_non_hiding(&p, None) + srs.commit_non_hiding(&p, num_chunks, None) }) .collect(); @@ -931,12 +1017,14 @@ mod tests { let mut srs = SRS::::create(n / 2 + 1); srs.add_lagrange_basis(domain); + let num_chunks = (domain.size() + srs.g.len() - 1) / srs.g.len(); + let expected_lagrange_commitments: Vec<_> = (0..n) .map(|i| { let mut e = vec![Fp::zero(); n]; e[i] = Fp::one(); let p = Evaluations::>::from_vec_and_domain(e, domain).interpolate(); - srs.commit_non_hiding(&p, Some(64)) + srs.commit_non_hiding(&p, num_chunks, Some(64)) }) .collect(); @@ -961,9 +1049,9 @@ mod tests { let rng = &mut StdRng::from_seed([0u8; 32]); // commit the two polynomials (and upperbound the second one) - let commitment = srs.commit(&poly1, None, rng); + let commitment = srs.commit(&poly1, 1, None, rng); let upperbound = poly2.degree() + 1; - let bounded_commitment = srs.commit(&poly2, Some(upperbound), rng); + let bounded_commitment = srs.commit(&poly2, 1, Some(upperbound), rng); // create an aggregated opening proof let (u, v) = (Fp::rand(rng), Fp::rand(rng)); @@ -994,10 +1082,10 @@ mod tests { // evaluate the polynomials at these two points let poly1_chunked_evals = vec![ poly1 - .to_chunked_polynomial(srs.g.len()) + .to_chunked_polynomial(1, srs.g.len()) .evaluate_chunks(elm[0]), poly1 - .to_chunked_polynomial(srs.g.len()) + .to_chunked_polynomial(1, srs.g.len()) .evaluate_chunks(elm[1]), ]; @@ -1010,10 +1098,10 @@ mod tests { let poly2_chunked_evals = vec![ poly2 - .to_chunked_polynomial(srs.g.len()) + .to_chunked_polynomial(1, srs.g.len()) .evaluate_chunks(elm[0]), poly2 - .to_chunked_polynomial(srs.g.len()) + .to_chunked_polynomial(1, srs.g.len()) .evaluate_chunks(elm[1]), ]; diff --git a/poly-commitment/src/evaluation_proof.rs b/poly-commitment/src/evaluation_proof.rs index 7c030787cc..0b15615b66 100644 --- a/poly-commitment/src/evaluation_proof.rs +++ b/poly-commitment/src/evaluation_proof.rs @@ -1,11 +1,11 @@ -use crate::srs::SRS; use crate::{commitment::*, srs::endos}; +use crate::{srs::SRS, PolynomialsToCombine, SRS as _}; use ark_ec::{msm::VariableBaseMSM, AffineCurve, ProjectiveCurve}; use ark_ff::{FftField, Field, One, PrimeField, UniformRand, Zero}; use ark_poly::{univariate::DensePolynomial, UVPolynomial}; use ark_poly::{EvaluationDomain, Evaluations}; use mina_poseidon::{sponge::ScalarChallenge, FqSponge}; -use o1_utils::math; +use o1_utils::{math, ExtendedDensePolynomial}; use rand_core::{CryptoRng, RngCore}; use rayon::prelude::*; use serde::{Deserialize, Serialize}; @@ -71,6 +71,103 @@ impl<'a, F: Field> ScaledChunkedPolynomial { } } +/// Combine the polynomials using `polyscale`, creating a single unified polynomial to open. +pub fn combine_polys>( + plnms: PolynomialsToCombine, // vector of polynomial with optional degree bound and commitment randomness + polyscale: G::ScalarField, // scaling factor for polynoms + srs_length: usize, +) -> (DensePolynomial, G::ScalarField) { + let mut plnm = ScaledChunkedPolynomial::::default(); + let mut plnm_evals_part = { + // For now just check that all the evaluation polynomials are the same degree so that we + // can do just a single FFT. + // Furthermore we check they have size less than the SRS size so we don't have to do chunking. + // If/when we change this, we can add more complicated code to handle different degrees. + let degree = plnms + .iter() + .fold(None, |acc, (p, _, _)| match p { + DensePolynomialOrEvaluations::DensePolynomial(_) => acc, + DensePolynomialOrEvaluations::Evaluations(_, d) => { + if let Some(n) = acc { + assert_eq!(n, d.size()); + } + Some(d.size()) + } + }) + .unwrap_or(0); + vec![G::ScalarField::zero(); degree] + }; + // let mut plnm_chunks: Vec<(G::ScalarField, OptShiftedPolynomial<_>)> = vec![]; + + let mut omega = G::ScalarField::zero(); + let mut scale = G::ScalarField::one(); + + // iterating over polynomials in the batch + for (p_i, degree_bound, omegas) in plnms { + match p_i { + DensePolynomialOrEvaluations::Evaluations(evals_i, sub_domain) => { + let stride = evals_i.evals.len() / sub_domain.size(); + let evals = &evals_i.evals; + plnm_evals_part + .par_iter_mut() + .enumerate() + .for_each(|(i, x)| { + *x += scale * evals[i * stride]; + }); + for j in 0..omegas.unshifted.len() { + omega += &(omegas.unshifted[j] * scale); + scale *= &polyscale; + } + // We assume here that we have no shifted segment. + // TODO: Remove shifted + } + + DensePolynomialOrEvaluations::DensePolynomial(p_i) => { + let mut offset = 0; + // iterating over chunks of the polynomial + if let Some(m) = degree_bound { + assert!(p_i.coeffs.len() <= m + 1); + } else { + assert!(omegas.shifted.is_none()); + } + for j in 0..omegas.unshifted.len() { + let segment = &p_i.coeffs[std::cmp::min(offset, p_i.coeffs.len()) + ..std::cmp::min(offset + srs_length, p_i.coeffs.len())]; + // always mixing in the unshifted segments + plnm.add_unshifted(scale, segment); + + omega += &(omegas.unshifted[j] * scale); + scale *= &polyscale; + offset += srs_length; + if let Some(m) = degree_bound { + if offset >= *m { + if offset > *m { + // mixing in the shifted segment since degree is bounded + plnm.add_shifted(scale, srs_length - m % srs_length, segment); + } + omega += &(omegas.shifted.unwrap() * scale); + scale *= &polyscale; + } + } + } + } + } + } + + let mut plnm = plnm.to_dense_polynomial(); + if !plnm_evals_part.is_empty() { + let n = plnm_evals_part.len(); + let max_poly_size = srs_length; + let num_chunks = n / max_poly_size; + plnm += &Evaluations::from_vec_and_domain(plnm_evals_part, D::new(n).unwrap()) + .interpolate() + .to_chunked_polynomial(num_chunks, max_poly_size) + .linearize(polyscale); + } + + (plnm, omega) +} + impl SRS { /// This function opens polynomial commitments in batch /// plnms: batch of polynomials to open commitments for with, optionally, max degrees @@ -101,6 +198,7 @@ impl SRS { EFqSponge: Clone + FqSponge, RNG: RngCore + CryptoRng, G::BaseField: PrimeField, + G: EndoCurve, { let (endo_q, endo_r) = endos::(); @@ -112,95 +210,7 @@ impl SRS { let mut g = self.g.clone(); g.extend(vec![G::zero(); padding]); - let (p, blinding_factor) = { - let mut plnm = ScaledChunkedPolynomial::::default(); - let mut plnm_evals_part = { - // For now just check that all the evaluation polynomials are the same degree so that we - // can do just a single FFT. - // Furthermore we check they have size less than the SRS size so we don't have to do chunking. - // If/when we change this, we can add more complicated code to handle different degrees. - let degree = plnms - .iter() - .fold(None, |acc, (p, _, _)| match p { - DensePolynomialOrEvaluations::DensePolynomial(_) => acc, - DensePolynomialOrEvaluations::Evaluations(_, d) => { - if let Some(n) = acc { - assert_eq!(n, d.size()); - } - Some(d.size()) - } - }) - .unwrap_or(0); - assert!(degree <= padded_length); - vec![G::ScalarField::zero(); degree] - }; - // let mut plnm_chunks: Vec<(G::ScalarField, OptShiftedPolynomial<_>)> = vec![]; - - let mut omega = G::ScalarField::zero(); - let mut scale = G::ScalarField::one(); - - // iterating over polynomials in the batch - for (p_i, degree_bound, omegas) in plnms { - match p_i { - DensePolynomialOrEvaluations::Evaluations(evals_i, sub_domain) => { - let stride = evals_i.evals.len() / sub_domain.size(); - let evals = &evals_i.evals; - plnm_evals_part - .par_iter_mut() - .enumerate() - .for_each(|(i, x)| { - *x += scale * evals[i * stride]; - }); - assert_eq!(omegas.unshifted.len(), 1); - omega += &(omegas.unshifted[0] * scale); - scale *= &polyscale; - } - - DensePolynomialOrEvaluations::DensePolynomial(p_i) => { - let mut offset = 0; - // iterating over chunks of the polynomial - if let Some(m) = degree_bound { - assert!(p_i.coeffs.len() <= m + 1); - } else { - assert!(omegas.shifted.is_none()); - } - for j in 0..omegas.unshifted.len() { - let segment = &p_i.coeffs - [offset..std::cmp::min(offset + self.g.len(), p_i.coeffs.len())]; - // always mixing in the unshifted segments - plnm.add_unshifted(scale, segment); - - omega += &(omegas.unshifted[j] * scale); - scale *= &polyscale; - offset += self.g.len(); - if let Some(m) = degree_bound { - if offset >= *m { - if offset > *m { - // mixing in the shifted segment since degree is bounded - plnm.add_shifted( - scale, - self.g.len() - m % self.g.len(), - segment, - ); - } - omega += &(omegas.shifted.unwrap() * scale); - scale *= &polyscale; - } - } - } - } - } - } - - let mut plnm = plnm.to_dense_polynomial(); - if !plnm_evals_part.is_empty() { - let n = plnm_evals_part.len(); - plnm += &Evaluations::from_vec_and_domain(plnm_evals_part, D::new(n).unwrap()) - .interpolate(); - } - - (plnm, omega) - }; + let (p, blinding_factor) = combine_polys::(plnms, polyscale, self.g.len()); let rounds = math::ceil_log2(self.g.len()); @@ -345,10 +355,59 @@ impl SRS { sg: g0, } } + + /// This function is a debugging helper. + #[allow(clippy::too_many_arguments)] + #[allow(clippy::type_complexity)] + #[allow(clippy::many_single_char_names)] + pub fn prover_polynomials_to_verifier_evaluations>( + &self, + plnms: &[( + DensePolynomialOrEvaluations, + Option, + PolyComm, + )], // vector of polynomial with optional degree bound and commitment randomness + elm: &[G::ScalarField], // vector of evaluation points + ) -> Vec> + where + G::BaseField: PrimeField, + { + plnms + .iter() + .enumerate() + .map(|(i, (poly_or_evals, degree_bound, blinders))| { + let poly = match poly_or_evals { + DensePolynomialOrEvaluations::DensePolynomial(poly) => (*poly).clone(), + DensePolynomialOrEvaluations::Evaluations(evals, _) => { + (*evals).clone().interpolate() + } + }; + let chunked_polynomial = + poly.to_chunked_polynomial(blinders.unshifted.len(), self.g.len()); + let chunked_commitment = + { self.commit_non_hiding(&poly, blinders.unshifted.len(), None) }; + let masked_commitment = match self.mask_custom(chunked_commitment, blinders) { + Ok(comm) => comm, + Err(err) => panic!("Error at index {i}: {err}"), + }; + let chunked_evals = elm + .iter() + .map(|elm| chunked_polynomial.evaluate_chunks(*elm)) + .collect(); + Evaluation { + commitment: masked_commitment.commitment, + + evaluations: chunked_evals, + + degree_bound: *degree_bound, + } + }) + .collect() + } } #[serde_as] -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Debug, Serialize, Deserialize, Default)] #[serde(bound = "G: ark_serialize::CanonicalDeserialize + ark_serialize::CanonicalSerialize")] pub struct OpeningProof { /// vector of rounds of L & R commitments @@ -364,6 +423,49 @@ pub struct OpeningProof { pub sg: G, } +impl< + BaseField: PrimeField, + G: AffineCurve + CommitmentCurve + EndoCurve, + > crate::OpenProof for OpeningProof +{ + type SRS = SRS; + + fn open::ScalarField>>( + srs: &Self::SRS, + group_map: &::Map, + plnms: &[( + DensePolynomialOrEvaluations<::ScalarField, D>, + Option, + PolyComm<::ScalarField>, + )], // vector of polynomial with optional degree bound and commitment randomness + elm: &[::ScalarField], // vector of evaluation points + polyscale: ::ScalarField, // scaling factor for polynoms + evalscale: ::ScalarField, // scaling factor for evaluation point powers + sponge: EFqSponge, // sponge + rng: &mut RNG, + ) -> Self + where + EFqSponge: + Clone + FqSponge<::BaseField, G, ::ScalarField>, + RNG: RngCore + CryptoRng, + { + srs.open(group_map, plnms, elm, polyscale, evalscale, sponge, rng) + } + + fn verify( + srs: &Self::SRS, + group_map: &G::Map, + batch: &mut [BatchEvaluationProof], + rng: &mut RNG, + ) -> bool + where + EFqSponge: FqSponge, + RNG: RngCore + CryptoRng, + { + srs.verify(group_map, batch, rng) + } +} + pub struct Challenges { pub chal: Vec, pub chal_inv: Vec, diff --git a/poly-commitment/src/lib.rs b/poly-commitment/src/lib.rs index 3b6cd8b3e6..4d7bac7913 100644 --- a/poly-commitment/src/lib.rs +++ b/poly-commitment/src/lib.rs @@ -3,9 +3,121 @@ mod combine; pub mod commitment; pub mod error; pub mod evaluation_proof; +pub mod pairing_proof; pub mod srs; #[cfg(test)] mod tests; pub use commitment::PolyComm; + +use crate::commitment::{BatchEvaluationProof, BlindedCommitment, CommitmentCurve}; +use crate::error::CommitmentError; +use crate::evaluation_proof::DensePolynomialOrEvaluations; +use ark_ec::AffineCurve; +use ark_ff::UniformRand; +use ark_poly::{ + univariate::DensePolynomial, EvaluationDomain, Evaluations, Radix2EvaluationDomain as D, +}; +use mina_poseidon::FqSponge; +use rand_core::{CryptoRng, RngCore}; + +pub trait SRS { + /// The maximum polynomial degree that can be committed to + fn max_poly_size(&self) -> usize; + + /// Retrieve the precomputed Lagrange basis for the given domain size + fn get_lagrange_basis(&self, domain_size: usize) -> Option<&Vec>>; + + /// Get the group element used for blinding commitments + fn blinding_commitment(&self) -> G; + + /// Commits a polynomial, potentially splitting the result in multiple commitments. + fn commit( + &self, + plnm: &DensePolynomial, + num_chunks: usize, + max: Option, + rng: &mut (impl RngCore + CryptoRng), + ) -> BlindedCommitment; + + /// Same as [SRS::mask] except that you can pass the blinders manually. + fn mask_custom( + &self, + com: PolyComm, + blinders: &PolyComm, + ) -> Result, CommitmentError>; + + /// Turns a non-hiding polynomial commitment into a hidding polynomial commitment. Transforms each given `` into `( + wH, w)` with a random `w` per commitment. + fn mask( + &self, + comm: PolyComm, + rng: &mut (impl RngCore + CryptoRng), + ) -> BlindedCommitment { + let blinders = comm.map(|_| G::ScalarField::rand(rng)); + self.mask_custom(comm, &blinders).unwrap() + } + + /// This function commits a polynomial using the SRS' basis of size `n`. + /// - `plnm`: polynomial to commit to with max size of sections + /// - `max`: maximal degree of the polynomial (not inclusive), if none, no degree bound + /// The function returns an unbounded commitment vector (which splits the commitment into several commitments of size at most `n`), + /// as well as an optional bounded commitment (if `max` is set). + /// Note that a maximum degree cannot (and doesn't need to) be enforced via a shift if `max` is a multiple of `n`. + fn commit_non_hiding( + &self, + plnm: &DensePolynomial, + num_chunks: usize, + max: Option, + ) -> PolyComm; + + fn commit_evaluations_non_hiding( + &self, + domain: D, + plnm: &Evaluations>, + ) -> PolyComm; + + fn commit_evaluations( + &self, + domain: D, + plnm: &Evaluations>, + rng: &mut (impl RngCore + CryptoRng), + ) -> BlindedCommitment; +} + +#[allow(type_alias_bounds)] +type PolynomialsToCombine<'a, G: CommitmentCurve, D: EvaluationDomain> = &'a [( + DensePolynomialOrEvaluations<'a, G::ScalarField, D>, + Option, + PolyComm, +)]; + +pub trait OpenProof: Sized { + type SRS: SRS; + + #[allow(clippy::too_many_arguments)] + fn open::ScalarField>>( + srs: &Self::SRS, + group_map: &::Map, + plnms: PolynomialsToCombine, // vector of polynomial with optional degree bound and commitment randomness + elm: &[::ScalarField], // vector of evaluation points + polyscale: ::ScalarField, // scaling factor for polynoms + evalscale: ::ScalarField, // scaling factor for evaluation point powers + sponge: EFqSponge, // sponge + rng: &mut RNG, + ) -> Self + where + EFqSponge: + Clone + FqSponge<::BaseField, G, ::ScalarField>, + RNG: RngCore + CryptoRng; + + fn verify( + srs: &Self::SRS, + group_map: &G::Map, + batch: &mut [BatchEvaluationProof], + rng: &mut RNG, + ) -> bool + where + EFqSponge: FqSponge, + RNG: RngCore + CryptoRng; +} diff --git a/poly-commitment/src/pairing_proof.rs b/poly-commitment/src/pairing_proof.rs new file mode 100644 index 0000000000..913cf15d0f --- /dev/null +++ b/poly-commitment/src/pairing_proof.rs @@ -0,0 +1,430 @@ +use crate::commitment::*; +use crate::evaluation_proof::{combine_polys, DensePolynomialOrEvaluations}; +use crate::srs::SRS; +use crate::{CommitmentError, PolynomialsToCombine, SRS as SRSTrait}; +use ark_ec::{msm::VariableBaseMSM, AffineCurve, PairingEngine}; +use ark_ff::{PrimeField, Zero}; +use ark_poly::{ + univariate::{DenseOrSparsePolynomial, DensePolynomial}, + EvaluationDomain, Evaluations, Polynomial, Radix2EvaluationDomain as D, UVPolynomial, +}; +use mina_poseidon::FqSponge; +use rand_core::{CryptoRng, RngCore}; +use serde::{Deserialize, Serialize}; +use serde_with::serde_as; + +#[serde_as] +#[derive(Debug, Serialize, Deserialize)] +#[serde( + bound = "Pair::G1Affine: ark_serialize::CanonicalDeserialize + ark_serialize::CanonicalSerialize" +)] +pub struct PairingProof { + #[serde_as(as = "o1_utils::serialization::SerdeAs")] + pub quotient: Pair::G1Affine, + #[serde_as(as = "o1_utils::serialization::SerdeAs")] + pub blinding: ::ScalarField, +} + +impl Default for PairingProof { + fn default() -> Self { + Self { + quotient: Pair::G1Affine::prime_subgroup_generator(), + blinding: ::ScalarField::zero(), + } + } +} + +impl Clone for PairingProof { + fn clone(&self) -> Self { + Self { + quotient: self.quotient, + blinding: self.blinding, + } + } +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct PairingSRS { + pub full_srs: SRS, + pub verifier_srs: SRS, +} + +impl Default for PairingSRS { + fn default() -> Self { + Self { + full_srs: SRS::default(), + verifier_srs: SRS::default(), + } + } +} + +impl Clone for PairingSRS { + fn clone(&self) -> Self { + Self { + full_srs: self.full_srs.clone(), + verifier_srs: self.verifier_srs.clone(), + } + } +} + +impl< + F: PrimeField, + G: CommitmentCurve, + G2: CommitmentCurve, + Pair: PairingEngine, + > PairingSRS +{ + pub fn create(x: F, n: usize) -> Self { + PairingSRS { + full_srs: SRS::create_trusted_setup(x, n), + verifier_srs: SRS::create_trusted_setup(x, 3), + } + } +} + +impl< + F: PrimeField, + G: CommitmentCurve, + G2: CommitmentCurve, + Pair: PairingEngine, + > crate::OpenProof for PairingProof +{ + type SRS = PairingSRS; + + fn open::ScalarField>>( + srs: &Self::SRS, + _group_map: &::Map, + plnms: &[( + DensePolynomialOrEvaluations<::ScalarField, D>, + Option, + PolyComm<::ScalarField>, + )], // vector of polynomial with optional degree bound and commitment randomness + elm: &[::ScalarField], // vector of evaluation points + polyscale: ::ScalarField, // scaling factor for polynoms + _evalscale: ::ScalarField, // scaling factor for evaluation point powers + _sponge: EFqSponge, // sponge + _rng: &mut RNG, + ) -> Self + where + EFqSponge: + Clone + FqSponge<::BaseField, G, ::ScalarField>, + RNG: RngCore + CryptoRng, + { + PairingProof::create(srs, plnms, elm, polyscale).unwrap() + } + + fn verify( + srs: &Self::SRS, + _group_map: &G::Map, + batch: &mut [BatchEvaluationProof], + _rng: &mut RNG, + ) -> bool + where + EFqSponge: FqSponge, + RNG: RngCore + CryptoRng, + { + for BatchEvaluationProof { + sponge: _, + evaluations, + evaluation_points, + polyscale, + evalscale: _, + opening, + combined_inner_product: _, + } in batch.iter() + { + if !opening.verify(srs, evaluations, *polyscale, evaluation_points) { + return false; + } + } + true + } +} + +impl< + F: PrimeField, + G: CommitmentCurve, + G2: CommitmentCurve, + Pair: PairingEngine, + > SRSTrait for PairingSRS +{ + fn max_poly_size(&self) -> usize { + self.full_srs.max_poly_size() + } + + fn get_lagrange_basis(&self, domain_size: usize) -> Option<&Vec>> { + self.full_srs.get_lagrange_basis(domain_size) + } + + fn blinding_commitment(&self) -> G { + self.full_srs.blinding_commitment() + } + + fn commit( + &self, + plnm: &DensePolynomial, + num_chunks: usize, + max: Option, + rng: &mut (impl RngCore + CryptoRng), + ) -> BlindedCommitment { + self.full_srs.commit(plnm, num_chunks, max, rng) + } + + fn mask_custom( + &self, + com: PolyComm, + blinders: &PolyComm, + ) -> Result, CommitmentError> { + self.full_srs.mask_custom(com, blinders) + } + + fn mask( + &self, + comm: PolyComm, + rng: &mut (impl RngCore + CryptoRng), + ) -> BlindedCommitment { + self.full_srs.mask(comm, rng) + } + + fn commit_non_hiding( + &self, + plnm: &DensePolynomial, + num_chunks: usize, + max: Option, + ) -> PolyComm { + self.full_srs.commit_non_hiding(plnm, num_chunks, max) + } + + fn commit_evaluations_non_hiding( + &self, + domain: D, + plnm: &Evaluations>, + ) -> PolyComm { + self.full_srs.commit_evaluations_non_hiding(domain, plnm) + } + + fn commit_evaluations( + &self, + domain: D, + plnm: &Evaluations>, + rng: &mut (impl RngCore + CryptoRng), + ) -> BlindedCommitment { + self.full_srs.commit_evaluations(domain, plnm, rng) + } +} + +/// The polynomial that evaluates to each of `evals` for the respective `elm`s. +fn eval_polynomial(elm: &[F], evals: &[F]) -> DensePolynomial { + assert_eq!(elm.len(), evals.len()); + let (zeta, zeta_omega) = if elm.len() == 2 { + (elm[0], elm[1]) + } else { + todo!() + }; + let (eval_zeta, eval_zeta_omega) = if evals.len() == 2 { + (evals[0], evals[1]) + } else { + todo!() + }; + + // The polynomial that evaluates to `p(zeta)` at `zeta` and `p(zeta_omega)` at + // `zeta_omega`. + // We write `p(x) = a + bx`, which gives + // ```text + // p(zeta) = a + b * zeta + // p(zeta_omega) = a + b * zeta_omega + // ``` + // and so + // ```text + // b = (p(zeta_omega) - p(zeta)) / (zeta_omega - zeta) + // a = p(zeta) - b * zeta + // ``` + let b = (eval_zeta_omega - eval_zeta) / (zeta_omega - zeta); + let a = eval_zeta - b * zeta; + DensePolynomial::from_coefficients_slice(&[a, b]) +} + +/// The polynomial that evaluates to `0` at the evaluation points. +fn divisor_polynomial(elm: &[F]) -> DensePolynomial { + elm.iter() + .map(|value| DensePolynomial::from_coefficients_slice(&[-(*value), F::one()])) + .reduce(|poly1, poly2| &poly1 * &poly2) + .unwrap() +} + +impl< + F: PrimeField, + G: CommitmentCurve, + G2: CommitmentCurve, + Pair: PairingEngine, + > PairingProof +{ + pub fn create>( + srs: &PairingSRS, + plnms: PolynomialsToCombine, // vector of polynomial with optional degree bound and commitment randomness + elm: &[G::ScalarField], // vector of evaluation points + polyscale: G::ScalarField, // scaling factor for polynoms + ) -> Option { + let (p, blinding_factor) = combine_polys::(plnms, polyscale, srs.full_srs.g.len()); + let evals: Vec<_> = elm.iter().map(|pt| p.evaluate(pt)).collect(); + + let quotient_poly = { + let eval_polynomial = eval_polynomial(elm, &evals); + let divisor_polynomial = divisor_polynomial(elm); + let numerator_polynomial = &p - &eval_polynomial; + let (quotient, remainder) = DenseOrSparsePolynomial::divide_with_q_and_r( + &numerator_polynomial.into(), + &divisor_polynomial.into(), + )?; + if !remainder.is_zero() { + return None; + } + quotient + }; + + let quotient = srs + .full_srs + .commit_non_hiding("ient_poly, 1, None) + .unshifted[0]; + + Some(PairingProof { + quotient, + blinding: blinding_factor, + }) + } + pub fn verify( + &self, + srs: &PairingSRS, // SRS + evaluations: &Vec>, // commitments to the polynomials + polyscale: G::ScalarField, // scaling factor for polynoms + elm: &[G::ScalarField], // vector of evaluation points + ) -> bool { + let poly_commitment = { + let mut scalars: Vec = Vec::new(); + let mut points = Vec::new(); + combine_commitments( + evaluations, + &mut scalars, + &mut points, + polyscale, + F::one(), /* TODO: This is inefficient */ + ); + let scalars: Vec<_> = scalars.iter().map(|x| x.into_repr()).collect(); + + VariableBaseMSM::multi_scalar_mul(&points, &scalars) + }; + let evals = combine_evaluations(evaluations, polyscale); + let blinding_commitment = srs.full_srs.h.mul(self.blinding); + let divisor_commitment = srs + .verifier_srs + .commit_non_hiding(&divisor_polynomial(elm), 1, None) + .unshifted[0]; + let eval_commitment = srs + .full_srs + .commit_non_hiding(&eval_polynomial(elm, &evals), 1, None) + .unshifted[0] + .into_projective(); + let numerator_commitment = { poly_commitment - eval_commitment - blinding_commitment }; + + let numerator = Pair::pairing( + numerator_commitment, + Pair::G2Affine::prime_subgroup_generator(), + ); + let scaled_quotient = Pair::pairing(self.quotient, divisor_commitment); + numerator == scaled_quotient + } +} + +#[cfg(test)] +mod tests { + use super::{PairingProof, PairingSRS}; + use crate::commitment::Evaluation; + use crate::evaluation_proof::DensePolynomialOrEvaluations; + use crate::srs::SRS; + use crate::SRS as _; + use ark_bn254::Fr as ScalarField; + use ark_bn254::{G1Affine as G1, G2Affine as G2, Parameters}; + use ark_ec::bn::Bn; + use ark_ff::UniformRand; + use ark_poly::{ + univariate::DensePolynomial, EvaluationDomain, Polynomial, Radix2EvaluationDomain as D, + UVPolynomial, + }; + + use rand::{rngs::StdRng, SeedableRng}; + + #[test] + fn test_pairing_proof() { + let n = 64; + let domain = D::::new(n).unwrap(); + + let rng = &mut StdRng::from_seed([0u8; 32]); + + let x = ScalarField::rand(rng); + + let mut srs = SRS::::create_trusted_setup(x, n); + let verifier_srs = SRS::::create_trusted_setup(x, 3); + srs.add_lagrange_basis(domain); + + let srs = PairingSRS { + full_srs: srs, + verifier_srs, + }; + + let polynomials: Vec<_> = (0..4) + .map(|_| { + let coeffs = (0..63).map(|_| ScalarField::rand(rng)).collect(); + DensePolynomial::from_coefficients_vec(coeffs) + }) + .collect(); + + let comms: Vec<_> = polynomials + .iter() + .map(|p| srs.full_srs.commit(p, 1, None, rng)) + .collect(); + + let polynomials_and_blinders: Vec<(DensePolynomialOrEvaluations<_, D<_>>, _, _)> = + polynomials + .iter() + .zip(comms.iter()) + .map(|(p, comm)| { + let p = DensePolynomialOrEvaluations::DensePolynomial(p); + (p, None, comm.blinders.clone()) + }) + .collect(); + + let evaluation_points = vec![ScalarField::rand(rng), ScalarField::rand(rng)]; + + let evaluations: Vec<_> = polynomials + .iter() + .zip(comms) + .map(|(p, commitment)| { + let evaluations = evaluation_points + .iter() + .map(|x| { + // Inputs are chosen to use only 1 chunk + vec![p.evaluate(x)] + }) + .collect(); + Evaluation { + commitment: commitment.commitment, + evaluations, + degree_bound: None, + } + }) + .collect(); + + let polyscale = ScalarField::rand(rng); + + let pairing_proof = PairingProof::>::create( + &srs, + polynomials_and_blinders.as_slice(), + &evaluation_points, + polyscale, + ) + .unwrap(); + + let res = pairing_proof.verify(&srs, &evaluations, polyscale, &evaluation_points); + assert!(res); + } +} diff --git a/poly-commitment/src/srs.rs b/poly-commitment/src/srs.rs index f4ae235871..c4cd29251b 100644 --- a/poly-commitment/src/srs.rs +++ b/poly-commitment/src/srs.rs @@ -3,8 +3,9 @@ use crate::commitment::CommitmentCurve; use crate::PolyComm; use ark_ec::{AffineCurve, ProjectiveCurve}; -use ark_ff::{BigInteger, PrimeField, Zero}; +use ark_ff::{BigInteger, Field, One, PrimeField, Zero}; use ark_poly::{EvaluationDomain, Radix2EvaluationDomain as D}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use blake2::{Blake2b512, Digest}; use groupmap::GroupMap; use serde::{Deserialize, Serialize}; @@ -15,7 +16,8 @@ use std::collections::HashMap; #[serde_as] #[derive(Debug, Clone, Default, Serialize, Deserialize, Eq)] -pub struct SRS { +#[serde(bound = "G: CanonicalDeserialize + CanonicalSerialize")] +pub struct SRS { /// The vector of group elements for committing to polynomials in coefficient form #[serde_as(as = "Vec")] pub g: Vec, @@ -31,7 +33,7 @@ pub struct SRS { impl PartialEq for SRS where - G: CommitmentCurve, + G: PartialEq, { fn eq(&self, other: &Self) -> bool { self.g == other.g && self.h == other.h @@ -59,27 +61,35 @@ where fn point_of_random_bytes(map: &G::Map, random_bytes: &[u8]) -> G where - G::BaseField: PrimeField, + G::BaseField: Field, { // packing in bit-representation const N: usize = 31; - let mut bits = [false; 8 * N]; - for i in 0..N { - for j in 0..8 { - bits[8 * i + j] = (random_bytes[i] >> j) & 1 == 1; + let extension_degree = G::BaseField::extension_degree() as usize; + + let mut base_fields = Vec::with_capacity(N * extension_degree); + + for base_count in 0..extension_degree { + let mut bits = [false; 8 * N]; + let offset = base_count * N; + for i in 0..N { + for j in 0..8 { + bits[8 * i + j] = (random_bytes[offset + i] >> j) & 1 == 1; + } } - } - let n = ::BigInt::from_bits_be(&bits); - let t = G::BaseField::from_repr(n).expect("packing code has a bug"); + let n = + <::BasePrimeField as PrimeField>::BigInt::from_bits_be(&bits); + let t = <::BasePrimeField as PrimeField>::from_repr(n) + .expect("packing code has a bug"); + base_fields.push(t) + } + let t = G::BaseField::from_base_prime_field_elems(&base_fields).unwrap(); let (x, y) = map.to_group(t); G::of_coordinates(x, y) } -impl SRS -where - G::BaseField: PrimeField, -{ +impl SRS { pub fn max_degree(&self) -> usize { self.g.len() } @@ -219,6 +229,39 @@ where self.lagrange_bases.insert(n, chunked_commitments); } + /// This function creates a trusted-setup SRS instance for circuits with number of rows up to `depth`. + pub fn create_trusted_setup(x: G::ScalarField, depth: usize) -> Self { + let m = G::Map::setup(); + + let mut x_pow = G::ScalarField::one(); + let g: Vec<_> = (0..depth) + .map(|_| { + let res = G::prime_subgroup_generator().mul(x_pow); + x_pow *= x; + res.into_affine() + }) + .collect(); + + const MISC: usize = 1; + let [h]: [G; MISC] = array::from_fn(|i| { + let mut h = Blake2b512::new(); + h.update("srs_misc".as_bytes()); + h.update((i as u32).to_be_bytes()); + point_of_random_bytes(&m, &h.finalize()) + }); + + SRS { + g, + h, + lagrange_bases: HashMap::new(), + } + } +} + +impl SRS +where + G::BaseField: PrimeField, +{ /// This function creates SRS instance for circuits with number of rows up to `depth`. pub fn create(depth: usize) -> Self { let m = G::Map::setup(); diff --git a/poly-commitment/src/tests/batch_15_wires.rs b/poly-commitment/src/tests/batch_15_wires.rs index 5d35aed6b3..570e8e8752 100644 --- a/poly-commitment/src/tests/batch_15_wires.rs +++ b/poly-commitment/src/tests/batch_15_wires.rs @@ -5,6 +5,7 @@ use crate::{ commitment::{combined_inner_product, BatchEvaluationProof, CommitmentCurve, Evaluation}, evaluation_proof::DensePolynomialOrEvaluations, srs::SRS, + SRS as _, }; use ark_ff::{UniformRand, Zero}; use ark_poly::{univariate::DensePolynomial, Radix2EvaluationDomain, UVPolynomial}; @@ -29,6 +30,8 @@ where let size = 1 << 7; let srs = SRS::::create(size); + let num_chunks = 1; + let group_map = ::Map::setup(); let sponge = DefaultFqSponge::::new( @@ -79,9 +82,9 @@ where let comm = (0..a.len()) .map(|i| { ( - srs.commit(&a[i].clone(), bounds[i], rng), + srs.commit(&a[i].clone(), num_chunks, bounds[i], rng), x.iter() - .map(|xx| a[i].to_chunked_polynomial(size).evaluate_chunks(*xx)) + .map(|xx| a[i].to_chunked_polynomial(1, size).evaluate_chunks(*xx)) .collect::>(), bounds[i], ) diff --git a/poly-commitment/src/tests/commitment.rs b/poly-commitment/src/tests/commitment.rs index b32101daf2..dedcd0ad6e 100644 --- a/poly-commitment/src/tests/commitment.rs +++ b/poly-commitment/src/tests/commitment.rs @@ -5,6 +5,7 @@ use crate::{ }, evaluation_proof::{DensePolynomialOrEvaluations, OpeningProof}, srs::SRS, + SRS as _, }; use ark_ff::{UniformRand, Zero}; use ark_poly::{univariate::DensePolynomial, Radix2EvaluationDomain, UVPolynomial}; @@ -76,7 +77,8 @@ impl AggregatedEvaluationProof { pub fn verify_type( &self, srs: &SRS, - ) -> BatchEvaluationProof> { + ) -> BatchEvaluationProof, OpeningProof> + { let mut coms = vec![]; for eval_com in &self.eval_commitments { assert_eq!(self.eval_points.len(), eval_com.chunked_evals.len()); @@ -139,6 +141,8 @@ fn test_randomised(mut rng: &mut RNG) { // create an SRS optimized for polynomials of degree 2^7 - 1 let srs = SRS::::create(1 << 7); + let num_chunks = 1; + // TODO: move to bench let mut time_commit = Duration::new(0, 0); let mut time_open = Duration::new(0, 0); @@ -173,13 +177,13 @@ fn test_randomised(mut rng: &mut RNG) { let BlindedCommitment { commitment: chunked_commitment, blinders: chunked_blinding, - } = srs.commit(&poly, bound, &mut rng); + } = srs.commit(&poly, num_chunks, bound, &mut rng); time_commit += timer.elapsed(); let mut chunked_evals = vec![]; for point in eval_points.clone() { chunked_evals.push( - poly.to_chunked_polynomial(srs.g.len()) + poly.to_chunked_polynomial(1, srs.g.len()) .evaluate_chunks(point), ); } diff --git a/poseidon/Cargo.toml b/poseidon/Cargo.toml index 10b44ec4d4..7a122051e4 100644 --- a/poseidon/Cargo.toml +++ b/poseidon/Cargo.toml @@ -37,4 +37,3 @@ ark-serialize = "0.3.0" [features] default = [] ocaml_types = [ "ocaml", "ocaml-gen", ] -debug_sponge = [] diff --git a/poseidon/src/dummy_values.rs b/poseidon/src/dummy_values.rs new file mode 100644 index 0000000000..9bfbc82335 --- /dev/null +++ b/poseidon/src/dummy_values.rs @@ -0,0 +1,831 @@ +use crate::poseidon::ArithmeticSpongeParams; +use ark_ff::Field; +use std::{fmt::Debug, str::FromStr}; + +/// Placeholder dummy value for the kimchi configuration, suitable for fields of bitlength 254 and +/// above. +/// These parameters are duplicated from the Vesta parameters, generated with +/// ```text +/// ./pasta/params.sage --rounds 55 rust 3 kimchi +/// ``` +pub fn kimchi_dummy>() -> ArithmeticSpongeParams { + ArithmeticSpongeParams { + mds: vec![ + vec![ + Fp::from_str( + "12035446894107573964500871153637039653510326950134440362813193268448863222019", + ) + .unwrap(), + Fp::from_str( + "25461374787957152039031444204194007219326765802730624564074257060397341542093", + ) + .unwrap(), + Fp::from_str( + "27667907157110496066452777015908813333407980290333709698851344970789663080149", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "4491931056866994439025447213644536587424785196363427220456343191847333476930", + ) + .unwrap(), + Fp::from_str( + "14743631939509747387607291926699970421064627808101543132147270746750887019919", + ) + .unwrap(), + Fp::from_str( + "9448400033389617131295304336481030167723486090288313334230651810071857784477", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "10525578725509990281643336361904863911009900817790387635342941550657754064843", + ) + .unwrap(), + Fp::from_str( + "27437632000253211280915908546961303399777448677029255413769125486614773776695", + ) + .unwrap(), + Fp::from_str( + "27566319851776897085443681456689352477426926500749993803132851225169606086988", + ) + .unwrap(), + ], + ], + + round_constants: vec![ + vec![ + Fp::from_str( + "21155079691556475130150866428468322463125560312786319980770950159250751855431", + ) + .unwrap(), + Fp::from_str( + "16883442198399350202652499677723930673110172289234921799701652810789093522349", + ) + .unwrap(), + Fp::from_str( + "17030687036425314703519085065002231920937594822150793091243263847382891822670", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "25216718237129482752721276445368692059997901880654047883630276346421457427360", + ) + .unwrap(), + Fp::from_str( + "9054264347380455706540423067244764093107767235485930776517975315876127782582", + ) + .unwrap(), + Fp::from_str( + "26439087121446593160953570192891907825526260324480347638727375735543609856888", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "15251000790817261169639394496851831733819930596125214313084182526610855787494", + ) + .unwrap(), + Fp::from_str( + "10861916012597714684433535077722887124099023163589869801449218212493070551767", + ) + .unwrap(), + Fp::from_str( + "18597653523270601187312528478986388028263730767495975370566527202946430104139", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "15831416454198644276563319006805490049460322229057756462580029181847589006611", + ) + .unwrap(), + Fp::from_str( + "15171856919255965617705854914448645702014039524159471542852132430360867202292", + ) + .unwrap(), + Fp::from_str( + "15488495958879593647482715143904752785889816789652405888927117106448507625751", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "19039802679983063488134304670998725949842655199289961967801223969839823940152", + ) + .unwrap(), + Fp::from_str( + "4720101937153217036737330058775388037616286510783561045464678919473230044408", + ) + .unwrap(), + Fp::from_str( + "10226318327254973427513859412126640040910264416718766418164893837597674300190", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "20878756131129218406920515859235137275859844638301967889441262030146031838819", + ) + .unwrap(), + Fp::from_str( + "7178475685651744631172532830973371642652029385893667810726019303466125436953", + ) + .unwrap(), + Fp::from_str( + "1996970955918516145107673266490486752153434673064635795711751450164177339618", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "15205545916434157464929420145756897321482314798910153575340430817222504672630", + ) + .unwrap(), + Fp::from_str( + "25660296961552699573824264215804279051322332899472350724416657386062327210698", + ) + .unwrap(), + Fp::from_str( + "13842611741937412200312851417353455040950878279339067816479233688850376089318", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "1383799642177300432144836486981606294838630135265094078921115713566691160459", + ) + .unwrap(), + Fp::from_str( + "1135532281155277588005319334542025976079676424839948500020664227027300010929", + ) + .unwrap(), + Fp::from_str( + "4384117336930380014868572224801371377488688194169758696438185377724744869360", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "21725577575710270071808882335900370909424604447083353471892004026180492193649", + ) + .unwrap(), + Fp::from_str( + "676128913284806802699862508051022306366147359505124346651466289788974059668", + ) + .unwrap(), + Fp::from_str( + "25186611339598418732666781049829183886812651492845008333418424746493100589207", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "10402240124664763733060094237696964473609580414190944671778761753887884341073", + ) + .unwrap(), + Fp::from_str( + "11918307118590866200687906627767559273324023585642003803337447146531313172441", + ) + .unwrap(), + Fp::from_str( + "16895677254395661024186292503536662354181715337630376909778003268311296637301", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "23818602699032741669874498456696325705498383130221297580399035778119213224810", + ) + .unwrap(), + Fp::from_str( + "4285193711150023248690088154344086684336247475445482883105661485741762600154", + ) + .unwrap(), + Fp::from_str( + "19133204443389422404056150665863951250222934590192266371578950735825153238612", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "5515589673266504033533906836494002702866463791762187140099560583198974233395", + ) + .unwrap(), + Fp::from_str( + "11830435563729472715615302060564876527985621376031612798386367965451821182352", + ) + .unwrap(), + Fp::from_str( + "7510711479224915247011074129666445216001563200717943545636462819681638560128", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "24694843201907722940091503626731830056550128225297370217610328578733387733444", + ) + .unwrap(), + Fp::from_str( + "27361655066973784653563425664091383058914302579694897188019422193564924110528", + ) + .unwrap(), + Fp::from_str( + "21606788186194534241166833954371013788633495786419718955480491478044413102713", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "19934060063390905409309407607814787335159021816537006003398035237707924006757", + ) + .unwrap(), + Fp::from_str( + "8495813630060004961768092461554180468161254914257386012937942498774724649553", + ) + .unwrap(), + Fp::from_str( + "27524960680529762202005330464726908693944660961000958842417927307941561848461", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "15178481650950399259757805400615635703086255035073919114667254549690862896985", + ) + .unwrap(), + Fp::from_str( + "16164780354695672259791105197274509251141405713012804937107314962551600380870", + ) + .unwrap(), + Fp::from_str( + "10529167793600778056702353412758954281652843049850979705476598375597148191979", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "721141070179074082553302896292167103755384741083338957818644728290501449040", + ) + .unwrap(), + Fp::from_str( + "22044408985956234023934090378372374883099115753118261312473550998188148912041", + ) + .unwrap(), + Fp::from_str( + "27068254103241989852888872162525066148367014691482601147536314217249046186315", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "3880429241956357176819112098792744584376727450211873998699580893624868748961", + ) + .unwrap(), + Fp::from_str( + "17387097125522937623262508065966749501583017524609697127088211568136333655623", + ) + .unwrap(), + Fp::from_str( + "6256814421247770895467770393029354017922744712896100913895513234184920631289", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "2942627347777337187690939671601251987500285937340386328746818861972711408579", + ) + .unwrap(), + Fp::from_str( + "24031654937764287280548628128490074801809101323243546313826173430897408945397", + ) + .unwrap(), + Fp::from_str( + "14401457902976567713827506689641442844921449636054278900045849050301331732143", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "20170632877385406450742199836933900257692624353889848352407590794211839130727", + ) + .unwrap(), + Fp::from_str( + "24056496193857444725324410428861722338174099794084586764867109123681727290181", + ) + .unwrap(), + Fp::from_str( + "11257913009612703357266904349759250619633397075667824800196659858304604714965", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "22228158921984425749199071461510152694025757871561406897041788037116931009246", + ) + .unwrap(), + Fp::from_str( + "9152163378317846541430311327336774331416267016980485920222768197583559318682", + ) + .unwrap(), + Fp::from_str( + "13906695403538884432896105059360907560653506400343268230130536740148070289175", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "7220714562509721437034241786731185291972496952091254931195414855962344025067", + ) + .unwrap(), + Fp::from_str( + "27608867305903811397208862801981345878179337369367554478205559689592889691927", + ) + .unwrap(), + Fp::from_str( + "13288465747219756218882697408422850918209170830515545272152965967042670763153", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "8251343892709140154567051772980662609566359215743613773155065627504813327653", + ) + .unwrap(), + Fp::from_str( + "22035238365102171608166944627493632660244312563934708756134297161332908879090", + ) + .unwrap(), + Fp::from_str( + "13560937766273321037807329177749403409731524715067067740487246745322577571823", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "21652518608959234550262559135285358020552897349934571164032339186996805408040", + ) + .unwrap(), + Fp::from_str( + "22479086963324173427634460342145551255011746993910136574926173581069603086891", + ) + .unwrap(), + Fp::from_str( + "13676501958531751140966255121288182631772843001727158043704693838707387130095", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "5680310394102577950568930199056707827608275306479994663197187031893244826674", + ) + .unwrap(), + Fp::from_str( + "25125360450906166639190392763071557410047335755341060350879819485506243289998", + ) + .unwrap(), + Fp::from_str( + "22659254028501616785029594492374243581602744364859762239504348429834224676676", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "23101411405087512171421838856759448177512679869882987631073569441496722536782", + ) + .unwrap(), + Fp::from_str( + "24149774013240355952057123660656464942409328637280437515964899830988178868108", + ) + .unwrap(), + Fp::from_str( + "5782097512368226173095183217893826020351125522160843964147125728530147423065", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "13540762114500083869920564649399977644344247485313990448129838910231204868111", + ) + .unwrap(), + Fp::from_str( + "20421637734328811337527547703833013277831804985438407401987624070721139913982", + ) + .unwrap(), + Fp::from_str( + "7742664118615900772129122541139124149525273579639574972380600206383923500701", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "1109643801053963021778418773196543643970146666329661268825691230294798976318", + ) + .unwrap(), + Fp::from_str( + "16580663920817053843121063692728699890952505074386761779275436996241901223840", + ) + .unwrap(), + Fp::from_str( + "14638514680222429058240285918830106208025229459346033470787111294847121792366", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "17080385857812672649489217965285727739557573467014392822992021264701563205891", + ) + .unwrap(), + Fp::from_str( + "26176268111736737558502775993925696791974738793095023824029827577569530708665", + ) + .unwrap(), + Fp::from_str( + "4382756253392449071896813428140986330161215829425086284611219278674857536001", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "13934033814940585315406666445960471293638427404971553891617533231178815348902", + ) + .unwrap(), + Fp::from_str( + "27054912732979753314774418228399230433963143177662848084045249524271046173121", + ) + .unwrap(), + Fp::from_str( + "28916070403698593376490976676534962592542013020010643734621202484860041243391", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "24820015636966360150164458094894587765384135259446295278101998130934963922381", + ) + .unwrap(), + Fp::from_str( + "7969535238488580655870884015145760954416088335296905520306227531221721881868", + ) + .unwrap(), + Fp::from_str( + "7690547696740080985104189563436871930607055124031711216224219523236060212249", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "9712576468091272384496248353414290908377825697488757134833205246106605867289", + ) + .unwrap(), + Fp::from_str( + "12148698031438398980683630141370402088785182722473169207262735228500190477924", + ) + .unwrap(), + Fp::from_str( + "14359657643133476969781351728574842164124292705609900285041476162075031948227", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "23563839965372067275137992801035780013422228997724286060975035719045352435470", + ) + .unwrap(), + Fp::from_str( + "4184634822776323233231956802962638484057536837393405750680645555481330909086", + ) + .unwrap(), + Fp::from_str( + "16249511905185772125762038789038193114431085603985079639889795722501216492487", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "11001863048692031559800673473526311616702863826063550559568315794438941516621", + ) + .unwrap(), + Fp::from_str( + "4702354107983530219070178410740869035350641284373933887080161024348425080464", + ) + .unwrap(), + Fp::from_str( + "23751680507533064238793742311430343910720206725883441625894258483004979501613", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "28670526516158451470169873496541739545860177757793329093045522432279094518766", + ) + .unwrap(), + Fp::from_str( + "3568312993091537758218792253361873752799472566055209125947589819564395417072", + ) + .unwrap(), + Fp::from_str( + "1819755756343439646550062754332039103654718693246396323207323333948654200950", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "5372129954699791301953948907349887257752247843844511069896766784624930478273", + ) + .unwrap(), + Fp::from_str( + "17512156688034945920605615850550150476471921176481039715733979181538491476080", + ) + .unwrap(), + Fp::from_str( + "25777105342317622165159064911913148785971147228777677435200128966844208883059", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "25350392006158741749134238306326265756085455157012701586003300872637887157982", + ) + .unwrap(), + Fp::from_str( + "20096724945283767296886159120145376967480397366990493578897615204296873954844", + ) + .unwrap(), + Fp::from_str( + "8063283381910110762785892100479219642751540456251198202214433355775540036851", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "4393613870462297385565277757207010824900723217720226130342463666351557475823", + ) + .unwrap(), + Fp::from_str( + "9874972555132910032057499689351411450892722671352476280351715757363137891038", + ) + .unwrap(), + Fp::from_str( + "23590926474329902351439438151596866311245682682435235170001347511997242904868", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "17723373371137275859467518615551278584842947963894791032296774955869958211070", + ) + .unwrap(), + Fp::from_str( + "2350345015303336966039836492267992193191479606566494799781846958620636621159", + ) + .unwrap(), + Fp::from_str( + "27755207882790211140683010581856487965587066971982625511152297537534623405016", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "6584607987789185408123601849106260907671314994378225066806060862710814193906", + ) + .unwrap(), + Fp::from_str( + "609759108847171587253578490536519506369136135254150754300671591987320319770", + ) + .unwrap(), + Fp::from_str( + "28435187585965602110074342250910608316032945187476441868666714022529803033083", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "16016664911651770663938916450245705908287192964254704641717751103464322455303", + ) + .unwrap(), + Fp::from_str( + "17551273293154696089066968171579395800922204266630874071186322718903959339163", + ) + .unwrap(), + Fp::from_str( + "20414195497994754529479032467015716938594722029047207834858832838081413050198", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "19773307918850685463180290966774465805537520595602496529624568184993487593855", + ) + .unwrap(), + Fp::from_str( + "24598603838812162820757838364185126333280131847747737533989799467867231166980", + ) + .unwrap(), + Fp::from_str( + "11040972566103463398651864390163813377135738019556270484707889323659789290225", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "5189242080957784038860188184443287562488963023922086723850863987437818393811", + ) + .unwrap(), + Fp::from_str( + "1435203288979376557721239239445613396009633263160237764653161500252258220144", + ) + .unwrap(), + Fp::from_str( + "13066591163578079667911016543985168493088721636164837520689376346534152547210", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "17345901407013599418148210465150865782628422047458024807490502489711252831342", + ) + .unwrap(), + Fp::from_str( + "22139633362249671900128029132387275539363684188353969065288495002671733200348", + ) + .unwrap(), + Fp::from_str( + "1061056418502836172283188490483332922126033656372467737207927075184389487061", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "10241738906190857416046229928455551829189196941239601756375665129874835232299", + ) + .unwrap(), + Fp::from_str( + "27808033332417845112292408673209999320983657696373938259351951416571545364415", + ) + .unwrap(), + Fp::from_str( + "18820154989873674261497645724903918046694142479240549687085662625471577737140", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "7983688435214640842673294735439196010654951226956101271763849527529940619307", + ) + .unwrap(), + Fp::from_str( + "17067928657801807648925755556866676899145460770352731818062909643149568271566", + ) + .unwrap(), + Fp::from_str( + "24472070825156236829515738091791182856425635433388202153358580534810244942762", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "25752201169361795911258625731016717414310986450004737514595241038036936283227", + ) + .unwrap(), + Fp::from_str( + "26041505376284666160132119888949817249574689146924196064963008712979256107535", + ) + .unwrap(), + Fp::from_str( + "23977050489096115210391718599021827780049209314283111721864956071820102846008", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "26678257097278788410676026718736087312816016749016738933942134600725962413805", + ) + .unwrap(), + Fp::from_str( + "10480026985951498884090911619636977502506079971893083605102044931823547311729", + ) + .unwrap(), + Fp::from_str( + "21126631300593007055117122830961273871167754554670317425822083333557535463396", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "1564862894215434177641156287699106659379648851457681469848362532131406827573", + ) + .unwrap(), + Fp::from_str( + "13247162472821152334486419054854847522301612781818744556576865965657773174584", + ) + .unwrap(), + Fp::from_str( + "8673615954922496961704442777870253767001276027366984739283715623634850885984", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "2794525076937490807476666942602262298677291735723129868457629508555429470085", + ) + .unwrap(), + Fp::from_str( + "4656175953888995612264371467596648522808911819700660048695373348629527757049", + ) + .unwrap(), + Fp::from_str( + "23221574237857660318443567292601561932489621919104226163978909845174616477329", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "1878392460078272317716114458784636517603142716091316893054365153068227117145", + ) + .unwrap(), + Fp::from_str( + "2370412714505757731457251173604396662292063533194555369091306667486647634097", + ) + .unwrap(), + Fp::from_str( + "17409784861870189930766639925394191888667317762328427589153989811980152373276", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "25869136641898166514111941708608048269584233242773814014385564101168774293194", + ) + .unwrap(), + Fp::from_str( + "11361209360311194794795494027949518465383235799633128250259863567683341091323", + ) + .unwrap(), + Fp::from_str( + "14913258820718821235077379851098720071902170702113538811112331615559409988569", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "12957012022018304419868287033513141736995211906682903915897515954290678373899", + ) + .unwrap(), + Fp::from_str( + "17128889547450684566010972445328859295804027707361763477802050112063630550300", + ) + .unwrap(), + Fp::from_str( + "23329219085372232771288306767242735245018143857623151155581182779769305489903", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "1607741027962933685476527275858938699728586794398382348454736018784568853937", + ) + .unwrap(), + Fp::from_str( + "2611953825405141009309433982109911976923326848135736099261873796908057448476", + ) + .unwrap(), + Fp::from_str( + "7372230383134982628913227482618052530364724821976589156840317933676130378411", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "20203606758501212620842735123770014952499754751430660463060696990317556818571", + ) + .unwrap(), + Fp::from_str( + "4678361398979174017885631008335559529633853759463947250620930343087749944307", + ) + .unwrap(), + Fp::from_str( + "27176462634198471376002287271754121925750749676999036165457559387195124025594", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "6361981813552614697928697527332318530502852015189048838072565811230204474643", + ) + .unwrap(), + Fp::from_str( + "13815234633287489023151647353581705241145927054858922281829444557905946323248", + ) + .unwrap(), + Fp::from_str( + "10888828634279127981352133512429657747610298502219125571406085952954136470354", + ) + .unwrap(), + ], + ], + } +} diff --git a/poseidon/src/lib.rs b/poseidon/src/lib.rs index 20d7ca4df8..943d54cb87 100644 --- a/poseidon/src/lib.rs +++ b/poseidon/src/lib.rs @@ -1,4 +1,5 @@ pub mod constants; +pub mod dummy_values; pub mod pasta; pub mod permutation; pub mod poseidon; diff --git a/poseidon/src/poseidon.rs b/poseidon/src/poseidon.rs index 0ee363edc9..ff06022910 100644 --- a/poseidon/src/poseidon.rs +++ b/poseidon/src/poseidon.rs @@ -1,8 +1,5 @@ //! This module implements Poseidon Hash Function primitive -#[cfg(feature = "debug_sponge")] -use std::sync::atomic::{AtomicU64, Ordering::SeqCst}; - use crate::constants::SpongeConstants; use crate::permutation::{full_round, poseidon_block_cipher}; use ark_ff::Field; @@ -52,8 +49,6 @@ pub struct ArithmeticSponge { pub state: Vec, params: &'static ArithmeticSpongeParams, pub constants: std::marker::PhantomData, - #[cfg(feature = "debug_sponge")] - pub id: u64, } impl ArithmeticSponge { @@ -77,17 +72,12 @@ impl Sponge for ArithmeticSponge { state.push(F::zero()); } - #[cfg(feature = "debug_sponge")] - static COUNTER: AtomicU64 = AtomicU64::new(0); - ArithmeticSponge { state, rate, sponge_state: SpongeState::Absorbed(0), params, constants: std::marker::PhantomData, - #[cfg(feature = "debug_sponge")] - id: COUNTER.fetch_add(1, SeqCst), } } diff --git a/poseidon/src/sponge.rs b/poseidon/src/sponge.rs index 385afbf539..ff7f00a412 100644 --- a/poseidon/src/sponge.rs +++ b/poseidon/src/sponge.rs @@ -3,9 +3,6 @@ use crate::poseidon::{ArithmeticSponge, ArithmeticSpongeParams, Sponge}; use ark_ec::{short_weierstrass_jacobian::GroupAffine, SWModelParameters}; use ark_ff::{BigInteger, Field, FpParameters, One, PrimeField, Zero}; -#[cfg(feature = "debug_sponge")] -use o1_utils::FieldHelpers; - pub use crate::FqSponge; pub const CHALLENGE_LENGTH_IN_LIMBS: usize = 2; @@ -132,49 +129,6 @@ where } } -// Debugging macros -- these only insert code when non-release build and -// "debug_sponge" feature is enabled. -macro_rules! debug_sponge { - ($name:expr, $sponge:expr) => { - #[cfg(feature = "debug_sponge")] - { - // No input - debug_sponge_print_state!($name, $sponge); - } - }; - ($name:expr, $input:expr, $sponge:expr) => { - #[cfg(feature = "debug_sponge")] - { - // Field input - debug_sponge_print_state!($name, $sponge); - - println!( - "debug_sponge: id{} {} input {}", - $sponge.id, - $name, - $input.to_hex() - ); - } - }; -} -#[cfg(feature = "debug_sponge")] -macro_rules! debug_sponge_print_state { - ($name:expr, $sponge:expr) => { - println!( - "debug_sponge: id{} {} state {:?} {}", - $sponge.id, - $name, - $sponge.sponge_state, - $sponge - .state - .iter() - .map(|f| { f.to_hex() }) - .collect::>() - .join(" "), - ); - }; -} - impl FqSponge, P::ScalarField> for DefaultFqSponge where @@ -183,7 +137,6 @@ where { fn new(params: &'static ArithmeticSpongeParams) -> DefaultFqSponge { let sponge = ArithmeticSponge::new(params); - debug_sponge!("new", sponge); DefaultFqSponge { sponge, last_squeezed: vec![], @@ -196,14 +149,10 @@ where if g.infinity { // absorb a fake point (0, 0) let zero = P::BaseField::zero(); - debug_sponge!("absorb", zero, self.sponge); self.sponge.absorb(&[zero]); - debug_sponge!("absorb", zero, self.sponge); self.sponge.absorb(&[zero]); } else { - debug_sponge!("absorb", g.x, self.sponge); self.sponge.absorb(&[g.x]); - debug_sponge!("absorb", g.y, self.sponge); self.sponge.absorb(&[g.y]); } } @@ -213,7 +162,6 @@ where self.last_squeezed = vec![]; for fe in x { - debug_sponge!("absorb", fe, self.sponge); self.sponge.absorb(&[*fe]) } } @@ -232,7 +180,6 @@ where ::BigInt::from_bits_le(&bits), ) .expect("padding code has a bug"); - debug_sponge!("absorb", fe, self.sponge); self.sponge.absorb(&[fe]); } else { let low_bit = if bits[0] { @@ -246,16 +193,13 @@ where ) .expect("padding code has a bug"); - debug_sponge!("absorb", high_bits, self.sponge); self.sponge.absorb(&[high_bits]); - debug_sponge!("absorb", low_bit, self.sponge); self.sponge.absorb(&[low_bit]); } }); } fn digest(mut self) -> P::ScalarField { - debug_sponge!("squeeze", self.sponge); let x: ::BigInt = self.squeeze_field().into_repr(); // Returns zero for values that are too large. // This means that there is a bias for the value zero (in one of the curve). @@ -267,17 +211,14 @@ where } fn digest_fq(mut self) -> P::BaseField { - debug_sponge!("squeeze", self.sponge); self.squeeze_field() } fn challenge(&mut self) -> P::ScalarField { - debug_sponge!("squeeze", self.sponge); self.squeeze(CHALLENGE_LENGTH_IN_LIMBS) } fn challenge_fq(&mut self) -> P::BaseField { - debug_sponge!("squeeze", self.sponge); self.squeeze_field() } } diff --git a/rust-toolchain b/rust-toolchain index 65ee095984..cc31fcd4f5 100644 --- a/rust-toolchain +++ b/rust-toolchain @@ -1 +1 @@ -1.67.0 +1.72 diff --git a/tools/kimchi-asm/CHANGELOG.md b/tools/kimchi-asm/CHANGELOG.md deleted file mode 100644 index 56773f1e6a..0000000000 --- a/tools/kimchi-asm/CHANGELOG.md +++ /dev/null @@ -1,13 +0,0 @@ -# Changelog - -All notable changes to this project will be documented in this file. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -## [Unreleased] - - -## 0.1.0 (2023-03-09) - -- Initial release diff --git a/tools/kimchi-asm/Cargo.toml b/tools/kimchi-asm/Cargo.toml deleted file mode 100644 index 4bd69deaef..0000000000 --- a/tools/kimchi-asm/Cargo.toml +++ /dev/null @@ -1,24 +0,0 @@ -[package] -name = "kimchi-asm" -version = "0.1.0" -description = "A tool to read circuits" -repository = "https://github.com/o1-labs/proof-systems" -homepage = "https://o1-labs.github.io/proof-systems/" -documentation = "https://o1-labs.github.io/proof-systems/rustdoc/" -readme = "README.md" -edition = "2021" -license = "Apache-2.0" - -[dependencies] -ark-ec = "0.3.0" -ark-ff = "0.3.0" -serde = { version = "1.0.136", features = ["derive"] } -serde_json = "1.0.79" -serde_with = "1.10.0" -tinytemplate = "1.1" - -mina-curves = { path = "../../curves", version = "0.1.0" } -kimchi = { path = "../../kimchi", version = "0.1.0" } -o1-utils = { path = "../../utils", version = "0.1.0" } -mina-poseidon = { path = "../../poseidon", version = "0.1.0" } -poly-commitment = { path = "../../poly-commitment", version = "0.1.0" } diff --git a/tools/kimchi-asm/README.md b/tools/kimchi-asm/README.md deleted file mode 100644 index b55767c23f..0000000000 --- a/tools/kimchi-asm/README.md +++ /dev/null @@ -1,34 +0,0 @@ -# Kimchi ASM - -Allows you to deserialize a circuit in JSON format and print it using some pseudo-assembly language. - -Simple pipe your JSON to this binary. For example: - -```console -$ cargo run --bin kimchi-asm < examples/circuits/poseidon.json -``` - -You will get an output like: - -```asm -row0.pub.Generic<1,0,0,0,0> -.l1 -> row4.l1 - -row1.pub.Generic<1,0,0,0,0> -.l1 -> row2.l1 - -row2.Generic<-1,0,0,1,0><-1,0,0,1,0> -.l1 -> row4.r1, .r1 -> row1.l1 -.l2 -> row0.l1, .r2 -> row2.l2 - -row3.Generic<-1,0,0,1,0><-1,0,0,1,0> -.l1 -> row4.r2, .r1 -> row3.l1 -.l2 -> row4.l2, .r2 -> row3.l2 - -row4.Generic<0,0,1,-1,0><0,0,1,-1,0> -.l1 -> row2.r2, .r1 -> row2.r1, .o1 -> row5.l1 -.l2 -> row3.r2, .r2 -> row3.r1, .o2 -> row4.o1 - -row5.Generic<1,0,0,0,-1> -.l1 -> row4.o2 -``` diff --git a/tools/kimchi-asm/src/main.rs b/tools/kimchi-asm/src/main.rs deleted file mode 100644 index 257146dc83..0000000000 --- a/tools/kimchi-asm/src/main.rs +++ /dev/null @@ -1,36 +0,0 @@ -use ark_ff::PrimeField; -use kimchi::circuits::gate::{Circuit, CircuitGate}; -use mina_curves::pasta::Fp; -use serde::de::DeserializeOwned; - -#[derive(serde::Deserialize)] -pub struct DeserializableCircuit -where - F: PrimeField, -{ - pub public_input_size: usize, - #[serde(bound = "CircuitGate: DeserializeOwned")] - pub gates: Vec>, -} - -impl<'a, F> From<&'a DeserializableCircuit> for Circuit<'a, F> -where - F: PrimeField, -{ - fn from(circuit: &'a DeserializableCircuit) -> Self { - Circuit::new(circuit.public_input_size, &circuit.gates) - } -} - -fn main() { - // get what was piped to this binary - let stdin = std::io::stdin(); - - // deserialize it to JSON - let circuit: DeserializableCircuit = - serde_json::from_reader(stdin).expect("couldn't deserialize the circuit"); - - let circuit: Circuit<_> = (&circuit).into(); - - println!("{}", circuit.generate_asm()); -} diff --git a/tools/kimchi-visu/src/lib.rs b/tools/kimchi-visu/src/lib.rs index dd917dea43..390a6924b5 100644 --- a/tools/kimchi-visu/src/lib.rs +++ b/tools/kimchi-visu/src/lib.rs @@ -13,7 +13,7 @@ use kimchi::{ curve::KimchiCurve, prover_index::ProverIndex, }; -use poly_commitment::commitment::CommitmentCurve; +use poly_commitment::{commitment::CommitmentCurve, evaluation_proof::OpeningProof}; use serde::Serialize; use std::{ collections::HashMap, @@ -75,7 +75,12 @@ where /// # Panics /// /// Will panic if `TinyTemplate::render()` returns `Error` or `std::fs::File::create()` returns `Error`. -pub fn visu(index: &ProverIndex, witness: Option>) { +pub fn visu( + index: &ProverIndex>, + witness: Option>, +) where + G::BaseField: PrimeField, +{ // serialize index let index = serde_json::to_string(index).expect("couldn't serialize index"); let mut data = format!("const index = {index};"); diff --git a/utils/src/chunked_polynomial.rs b/utils/src/chunked_polynomial.rs index c8d09c42b3..6f79de09ea 100644 --- a/utils/src/chunked_polynomial.rs +++ b/utils/src/chunked_polynomial.rs @@ -64,6 +64,7 @@ mod tests { let one = Fp::one(); let zeta = one + one; let zeta_n = zeta.square(); + let num_chunks = 4; let res = (one + zeta) * (one + zeta_n + zeta_n * zeta.square() + zeta_n * zeta.square() * zeta.square()); @@ -71,7 +72,10 @@ mod tests { let coeffs = [one, one, one, one, one, one, one, one]; let f = DensePolynomial::from_coefficients_slice(&coeffs); - let eval = f.to_chunked_polynomial(2).linearize(zeta_n).evaluate(&zeta); + let eval = f + .to_chunked_polynomial(num_chunks, 2) + .linearize(zeta_n) + .evaluate(&zeta); assert!(eval == res); } diff --git a/utils/src/dense_polynomial.rs b/utils/src/dense_polynomial.rs index fdb2b9e4a3..72560f1057 100644 --- a/utils/src/dense_polynomial.rs +++ b/utils/src/dense_polynomial.rs @@ -22,8 +22,8 @@ pub trait ExtendedDensePolynomial { fn eval_polynomial(coeffs: &[F], x: F) -> F; /// Convert a polynomial into chunks. - /// Implementors must ensure that the result contains at least 1 chunk. - fn to_chunked_polynomial(&self, size: usize) -> ChunkedPolynomial; + /// Implementors must ensure that the result contains exactly num_chunks. + fn to_chunked_polynomial(&self, num_chunks: usize, size: usize) -> ChunkedPolynomial; } impl ExtendedDensePolynomial for DensePolynomial { @@ -46,20 +46,17 @@ impl ExtendedDensePolynomial for DensePolynomial { DensePolynomial::from_coefficients_slice(coeffs).evaluate(&x) } - fn to_chunked_polynomial(&self, chunk_size: usize) -> ChunkedPolynomial { - // Ensure that there is always at least 1 polynomial in the resulting chunked polynomial. - if self.coeffs.is_empty() { - return ChunkedPolynomial { - polys: vec![DensePolynomial::from_coefficients_vec(vec![])], - size: chunk_size, - }; - } - - let mut chunk_polys: Vec> = vec![]; + fn to_chunked_polynomial(&self, num_chunks: usize, chunk_size: usize) -> ChunkedPolynomial { + let mut chunk_polys: Vec> = Vec::with_capacity(num_chunks); for chunk in self.coeffs.chunks(chunk_size) { chunk_polys.push(DensePolynomial::from_coefficients_slice(chunk)); } + // Pad unused chunks with zeros. + for _ in chunk_polys.len()..num_chunks { + chunk_polys.push(DensePolynomial::from_coefficients_vec(vec![])); + } + ChunkedPolynomial { polys: chunk_polys, size: chunk_size, @@ -83,12 +80,14 @@ mod tests { let one = Fp::one(); let two = one + one; let three = two + one; + let num_chunks = 4; // 1 + x + x^2 + x^3 + x^4 + x^5 + x^6 + x^7 let coeffs = [one, one, one, one, one, one, one, one]; let f = DensePolynomial::from_coefficients_slice(&coeffs); - let evals = f.to_chunked_polynomial(2).evaluate_chunks(two); - for eval in evals.into_iter().take(4) { + let evals = f.to_chunked_polynomial(num_chunks, 2).evaluate_chunks(two); + assert_eq!(evals.len(), num_chunks); + for eval in evals.into_iter().take(num_chunks) { assert!(eval == three); } }