diff --git a/.github/workflows/benches.yml b/.github/workflows/benches.yml index f957eda17c..e8ac86e6b9 100644 --- a/.github/workflows/benches.yml +++ b/.github/workflows/benches.yml @@ -7,7 +7,7 @@ on: env: OCAML_VERSION: "4.14.0" - RUST_TOOLCHAIN_VERSION: "1.67" + RUST_TOOLCHAIN_VERSION: "1.71" jobs: diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 2fd96e8a07..3490171728 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -2,8 +2,6 @@ name: CI on: push: - branches: - - master pull_request: env: diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000000..5b1dec4f59 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "optimism/ethereum-optimism"] + path = optimism/ethereum-optimism + url = https://github.com/ethereum-optimism/optimism.git diff --git a/Cargo.lock b/Cargo.lock index 5602de9ffa..6c7365613e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -17,6 +17,12 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" +[[package]] +name = "adler32" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234" + [[package]] name = "ahash" version = "0.7.6" @@ -28,6 +34,18 @@ dependencies = [ "version_check", ] +[[package]] +name = "ahash" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd7d5a2cecb58716e47d67d5703a249964b14c7be1ec3cad3affc295b2d1c35d" +dependencies = [ + "cfg-if 1.0.0", + "once_cell", + "version_check", + "zerocopy", +] + [[package]] name = "aho-corasick" version = "1.0.2" @@ -46,6 +64,54 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "anstream" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ab91ebe16eb252986481c5b62f6098f3b698a45e34b5b98200cf20dd2484a44" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7079075b41f533b8c61d2a4d073c4676e1f8b249ff94a393b0595db304e0dd87" + +[[package]] +name = "anstyle-parse" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "317b9a89c1868f5ea6ff1d9539a69f45dffc21ce321ac1fd1160dfa48c8e2140" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" +dependencies = [ + "windows-sys", +] + +[[package]] +name = "anstyle-wincon" +version = "3.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0699d10d2f4d628a98ee7b57b289abbc98ff3bad977cb3152709d4bf2330628" +dependencies = [ + "anstyle", + "windows-sys", +] + [[package]] name = "ark-algebra-test-templates" version = "0.3.0" @@ -58,6 +124,17 @@ dependencies = [ "ark-std", ] +[[package]] +name = "ark-bn254" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea691771ebbb28aea556c044e2e5c5227398d840cee0c34d4d20fa8eb2689e8c" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-std", +] + [[package]] name = "ark-ec" version = "0.3.0" @@ -145,7 +222,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8dd4e5f0bf8285d5ed538d27fab7411f3e297908fd93c62195de8bee3f199e82" dependencies = [ - "proc-macro2 1.0.64", + "proc-macro2 1.0.69", "quote 1.0.29", "syn 1.0.109", ] @@ -179,7 +256,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87bf87e6e8b47264efa9bde63d6225c6276a52e05e91bf37eaa8afd0032d6b71" dependencies = [ "askama_shared", - "proc-macro2 1.0.64", + "proc-macro2 1.0.69", "syn 1.0.109", ] @@ -202,7 +279,7 @@ dependencies = [ "nom", "num-traits", "percent-encoding", - "proc-macro2 1.0.64", + "proc-macro2 1.0.69", "quote 1.0.29", "serde", "syn 1.0.109", @@ -243,9 +320,9 @@ dependencies = [ [[package]] name = "base64" -version = "0.21.2" +version = "0.21.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" +checksum = "35636a1494ede3b646cc98f74f8e62c773a38a659ebc777a2cf26b9b74171df9" [[package]] name = "bcs" @@ -406,7 +483,7 @@ dependencies = [ "atty", "bitflags 1.3.2", "clap_derive", - "clap_lex", + "clap_lex 0.2.4", "indexmap", "once_cell", "strsim 0.10.0", @@ -414,6 +491,27 @@ dependencies = [ "textwrap 0.16.0", ] +[[package]] +name = "clap" +version = "4.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d04704f56c2cde07f43e8e2c154b43f216dc5c92fc98ada720177362f953b956" +dependencies = [ + "clap_builder", +] + +[[package]] +name = "clap_builder" +version = "4.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e231faeaca65ebd1ea3c737966bf858971cd38c3849107aa3ea7de90a804e45" +dependencies = [ + "anstream", + "anstyle", + "clap_lex 0.5.1", + "strsim 0.10.0", +] + [[package]] name = "clap_derive" version = "3.2.25" @@ -422,7 +520,7 @@ checksum = "ae6371b8bdc8b7d3959e9cf7b22d4435ef3e79e138688421ec654acf8c81b008" dependencies = [ "heck", "proc-macro-error", - "proc-macro2 1.0.64", + "proc-macro2 1.0.69", "quote 1.0.29", "syn 1.0.109", ] @@ -436,6 +534,18 @@ dependencies = [ "os_str_bytes", ] +[[package]] +name = "clap_lex" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd7cc57abe963c6d3b9d8be5b06ba7c8957a930305ca90304f24ef040aa6f961" + +[[package]] +name = "colorchoice" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" + [[package]] name = "colored" version = "2.0.4" @@ -498,6 +608,15 @@ dependencies = [ "unicode-segmentation", ] +[[package]] +name = "core2" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b49ba7ef1ad6107f8824dbe97de947cbaac53c44e7f9756a1fba0d37c1eec505" +dependencies = [ + "memchr", +] + [[package]] name = "cpufeatures" version = "0.2.9" @@ -656,7 +775,7 @@ checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.64", + "proc-macro2 1.0.69", "quote 1.0.29", "strsim 0.10.0", "syn 1.0.109", @@ -673,13 +792,19 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "dary_heap" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7762d17f1241643615821a8455a0b2c3e803784b058693d990b11f2dce25a0ca" + [[package]] name = "derivative" version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ - "proc-macro2 1.0.64", + "proc-macro2 1.0.69", "quote 1.0.29", "syn 1.0.109", ] @@ -716,6 +841,12 @@ version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91" +[[package]] +name = "elf" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f6e7d85896690fe195447717af8eceae0593ac2196fd42fe88c184e904406ce" + [[package]] name = "entities" version = "1.0.1" @@ -792,9 +923,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.26" +version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b9429470923de8e8cbd4d2dc513535400b4b3fef0319fb5c4e1f520a7bef743" +checksum = "46303f565772937ffe1d394a4fac6f411c6013172fadde9dcdb1e147a086940e" dependencies = [ "crc32fast", "miniz_oxide", @@ -896,7 +1027,7 @@ version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" dependencies = [ - "ahash", + "ahash 0.7.6", ] [[package]] @@ -905,7 +1036,16 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" dependencies = [ - "ahash", + "ahash 0.7.6", +] + +[[package]] +name = "hashbrown" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" +dependencies = [ + "ahash 0.8.5", ] [[package]] @@ -1089,6 +1229,7 @@ dependencies = [ name = "kimchi" version = "0.1.0" dependencies = [ + "ark-bn254", "ark-ec", "ark-ff", "ark-poly", @@ -1132,7 +1273,7 @@ dependencies = [ ] [[package]] -name = "kimchi-asm" +name = "kimchi-visu" version = "0.1.0" dependencies = [ "ark-ec", @@ -1149,20 +1290,27 @@ dependencies = [ ] [[package]] -name = "kimchi-visu" +name = "kimchi_optimism" version = "0.1.0" dependencies = [ - "ark-ec", + "ark-bn254", "ark-ff", + "ark-poly", + "base64", + "clap 4.4.6", + "elf", + "groupmap", + "hex", "kimchi", + "libflate", "mina-curves", "mina-poseidon", - "o1-utils", "poly-commitment", + "regex", + "rmp-serde", "serde", "serde_json", "serde_with", - "tinytemplate", ] [[package]] @@ -1183,6 +1331,30 @@ version = "0.2.147" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" +[[package]] +name = "libflate" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f7d5654ae1795afc7ff76f4365c2c8791b0feb18e8996a96adad8ffd7c3b2bf" +dependencies = [ + "adler32", + "core2", + "crc32fast", + "dary_heap", + "libflate_lz77", +] + +[[package]] +name = "libflate_lz77" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be5f52fb8c451576ec6b79d3f4deb327398bc05bbdbd99021a6e77a4c855d524" +dependencies = [ + "core2", + "hashbrown 0.13.2", + "rle-decode-fast", +] + [[package]] name = "libm" version = "0.2.7" @@ -1224,9 +1396,9 @@ checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4" [[package]] name = "memchr" -version = "2.5.0" +version = "2.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" +checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" [[package]] name = "memoffset" @@ -1263,7 +1435,7 @@ version = "4.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b5bc45b761bcf1b5e6e6c4128cd93b84c218721a8d9b894aa0aff4ed180174c" dependencies = [ - "proc-macro2 1.0.64", + "proc-macro2 1.0.69", "quote 1.0.29", "syn 1.0.109", ] @@ -1470,7 +1642,7 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" dependencies = [ - "proc-macro2 1.0.64", + "proc-macro2 1.0.69", "quote 1.0.29", "syn 1.0.109", ] @@ -1565,7 +1737,7 @@ version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b40aa99a001268b85eb18414ecd190dc21fceaeaf81214ca28233b6feb25a998" dependencies = [ - "proc-macro2 1.0.64", + "proc-macro2 1.0.69", "quote 1.0.29", "syn 1.0.109", "synstructure", @@ -1590,7 +1762,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1894efdef5c9d83d17932c5f5db16d16eb5c8ae1a625ce44d9d1715e85d9d8dc" dependencies = [ "convert_case", - "proc-macro2 1.0.64", + "proc-macro2 1.0.69", "quote 1.0.29", "syn 1.0.109", ] @@ -1701,9 +1873,9 @@ checksum = "b3e8cba4ec22bada7fc55ffe51e2deb6a0e0db2d0b7ab0b103acc80d2510c190" dependencies = [ "pest", "pest_meta", - "proc-macro2 1.0.64", + "proc-macro2 1.0.69", "quote 1.0.29", - "syn 2.0.25", + "syn 2.0.38", ] [[package]] @@ -1769,6 +1941,7 @@ dependencies = [ name = "poly-commitment" version = "0.1.0" dependencies = [ + "ark-bn254", "ark-ec", "ark-ff", "ark-poly", @@ -1806,7 +1979,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.64", + "proc-macro2 1.0.69", "quote 1.0.29", "syn 1.0.109", "version_check", @@ -1818,7 +1991,7 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.64", + "proc-macro2 1.0.69", "quote 1.0.29", "version_check", ] @@ -1840,9 +2013,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.64" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78803b62cbf1f46fde80d7c0e803111524b9877184cfe7c3033659490ac7a7da" +checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da" dependencies = [ "unicode-ident", ] @@ -1917,7 +2090,7 @@ version = "1.0.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "573015e8ab27661678357f27dc26460738fd2b6c86e46f386fde94cb5d913105" dependencies = [ - "proc-macro2 1.0.64", + "proc-macro2 1.0.69", ] [[package]] @@ -2007,25 +2180,25 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.1" +version = "1.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2eae68fc220f7cf2532e4494aded17545fce192d59cd996e0fe7887f4ceb575" +checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343" dependencies = [ "aho-corasick", "memchr", "regex-automata", - "regex-syntax 0.7.3", + "regex-syntax 0.8.2", ] [[package]] name = "regex-automata" -version = "0.3.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83d3daa6976cffb758ec878f108ba0e062a45b2d6ca3a2cca965338855476caf" +checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.7.3", + "regex-syntax 0.8.2", ] [[package]] @@ -2036,9 +2209,15 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.7.3" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ab07dc67230e4a4718e70fd5c20055a4334b121f1f9db8fe63ef39ce9b8c846" +checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" + +[[package]] +name = "rle-decode-fast" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3582f63211428f83597b51b2ddb88e2a91a9d52d12831f9d08f5e624e8977422" [[package]] name = "rmp" @@ -2210,9 +2389,9 @@ version = "1.0.171" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "389894603bd18c46fa56231694f8d827779c0951a667087194cf9de94ed24682" dependencies = [ - "proc-macro2 1.0.64", + "proc-macro2 1.0.69", "quote 1.0.29", - "syn 2.0.25", + "syn 2.0.38", ] [[package]] @@ -2243,7 +2422,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" dependencies = [ "darling", - "proc-macro2 1.0.64", + "proc-macro2 1.0.69", "quote 1.0.29", "syn 1.0.109", ] @@ -2324,7 +2503,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ "heck", - "proc-macro2 1.0.64", + "proc-macro2 1.0.69", "quote 1.0.29", "rustversion", "syn 1.0.109", @@ -2381,18 +2560,18 @@ version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.64", + "proc-macro2 1.0.69", "quote 1.0.29", "unicode-ident", ] [[package]] name = "syn" -version = "2.0.25" +version = "2.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15e3fc8c0c74267e2df136e5e5fb656a464158aa57624053375eb9c8c6e25ae2" +checksum = "e96b79aaa137db8f61e26363a0c9b47d8b4ec75da28b7d1d614c2303e232408b" dependencies = [ - "proc-macro2 1.0.64", + "proc-macro2 1.0.69", "quote 1.0.29", "unicode-ident", ] @@ -2403,7 +2582,7 @@ version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ - "proc-macro2 1.0.64", + "proc-macro2 1.0.69", "quote 1.0.29", "syn 1.0.109", "unicode-xid 0.2.4", @@ -2512,9 +2691,9 @@ version = "1.0.43" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "463fe12d7993d3b327787537ce8dd4dfa058de32fc2b195ef3cde03dc4771e8f" dependencies = [ - "proc-macro2 1.0.64", + "proc-macro2 1.0.69", "quote 1.0.29", - "syn 2.0.25", + "syn 2.0.38", ] [[package]] @@ -2663,6 +2842,12 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39ec24b3121d976906ece63c9daad25b85969647682eee313cb5779fdd69e14e" +[[package]] +name = "utf8parse" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" + [[package]] name = "vec_map" version = "0.8.2" @@ -2719,9 +2904,9 @@ dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2 1.0.64", + "proc-macro2 1.0.69", "quote 1.0.29", - "syn 2.0.25", + "syn 2.0.38", "wasm-bindgen-shared", ] @@ -2741,9 +2926,9 @@ version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ - "proc-macro2 1.0.64", + "proc-macro2 1.0.69", "quote 1.0.29", - "syn 2.0.25", + "syn 2.0.38", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -2910,6 +3095,26 @@ dependencies = [ "linked-hash-map", ] +[[package]] +name = "zerocopy" +version = "0.7.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8db0ac2df3d060f81ec0380ccc5b71c2a7c092cfced671feeee1320e95559c87" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b6093bc6d5265ff40b479c834cdd25d8e20784781a2a29a8106327393d0a9ff" +dependencies = [ + "proc-macro2 1.0.69", + "quote 1.0.29", + "syn 2.0.38", +] + [[package]] name = "zeroize" version = "1.6.0" @@ -2925,7 +3130,7 @@ version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ - "proc-macro2 1.0.64", + "proc-macro2 1.0.69", "quote 1.0.29", - "syn 2.0.25", + "syn 2.0.38", ] diff --git a/Cargo.toml b/Cargo.toml index 473e37d933..851713bf6b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,11 +6,11 @@ members = [ "groupmap", "hasher", "kimchi", + "optimism", "poseidon", "poseidon/export_test_vectors", "poly-commitment", "signer", - "tools/kimchi-asm", "tools/kimchi-visu", "utils", "internal-tracing", diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 95d99267d6..aeccb68783 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -58,17 +58,6 @@ - [Permutation](./kimchi/permut.md) - [Lookup](./kimchi/lookup.md) -# Snarky - -- [Overview](./snarky/overview.md) -- [API](./snarky/api.md) -- [snarky wrapper](./snarky/snarky-wrapper.md) -- [Kimchi backend](./snarky/kimchi-backend.md) -- [Vars](./snarky/vars.md) -- [Booleans](./snarky/booleans.md) -- [Circuit generation](./snarky/circuit-generation.md) -- [Witness generation](./snarky/witness-generation.md) - # Pickles & Inductive Proof Systems - [Overview](./fundamentals/zkbook_ips.md) diff --git a/book/src/snarky/api.md b/book/src/snarky/api.md deleted file mode 100644 index e8b981a474..0000000000 --- a/book/src/snarky/api.md +++ /dev/null @@ -1,2 +0,0 @@ -# API of Snarky - diff --git a/book/src/snarky/booleans.md b/book/src/snarky/booleans.md deleted file mode 100644 index 7b503f0580..0000000000 --- a/book/src/snarky/booleans.md +++ /dev/null @@ -1,73 +0,0 @@ -# Booleans - -Booleans are a good example of a [snarky variable](./vars.md#snarky-vars). - -```rust -pub struct Boolean(CVar); - -impl SnarkyType for Boolean -where - F: PrimeField, -{ - type Auxiliary = (); - - type OutOfCircuit = bool; - - const SIZE_IN_FIELD_ELEMENTS: usize = 1; - - fn to_cvars(&self) -> (Vec>, Self::Auxiliary) { - (vec![self.0.clone()], ()) - } - - fn from_cvars_unsafe(cvars: Vec>, _aux: Self::Auxiliary) -> Self { - assert_eq!(cvars.len(), Self::SIZE_IN_FIELD_ELEMENTS); - Self(cvars[0].clone()) - } - - fn check(&self, cs: &mut RunState) { - // TODO: annotation? - cs.assert_(Some("boolean check"), vec![BasicSnarkyConstraint::Boolean(self.0.clone())]); - } - - fn deserialize(&self) -> (Self::OutOfCircuit, Self::Auxiliary) { - todo!() - } - - fn serialize(out_of_circuit: Self::OutOfCircuit, aux: Self::Auxiliary) -> Self { - todo!() - } - - fn constraint_system_auxiliary() -> Self::Auxiliary { - todo!() - } - - fn value_to_field_elements(x: &Self::OutOfCircuit) -> (Vec, Self::Auxiliary) { - todo!() - } - - fn value_of_field_elements(x: (Vec, Self::Auxiliary)) -> Self::OutOfCircuit { - todo!() - } -} -``` - -## Check - -The `check()` function is simply constraining the `CVar` $x$ to be either $0$ or $1$ using the following constraint: - -$$x ( x - 1) = 0$$ - -It is trivial to use the [double generic gate](../specs/kimchi.md#double-generic-gate) for this. - -## And - -$$x \land y = x \times y$$ - -## Not - -$$\sim x = 1 - x$$ - -## Or - -* $\sim x \land \sim y = b$ -* $x \lor y = \sim b$ diff --git a/book/src/snarky/circuit-generation.md b/book/src/snarky/circuit-generation.md deleted file mode 100644 index e81793aa03..0000000000 --- a/book/src/snarky/circuit-generation.md +++ /dev/null @@ -1,29 +0,0 @@ -# Circuit generation - -In circuit generation mode, the `has_witness` field of `RunState` is set to the default `CircuitGeneration`, and the program of the user is ran to completion. - -During the execution, the different snarky functions called on `RunState` will create [internal variables](./vars.md) as well as constraints. - -## Creation of variables - -[Variables](./vars.md) can be created via the `compute()` function, which takes two arguments: - -* A `TypeCreation` toggle, which is either set to `Checked` or `Unsafe`. We will describe this below. -* A closure representing the actual computation behind the variable. This computation will only take place when real values are computed, and can be non-deterministic (e.g. random, or external values provided by the user). Note that the closure takes one argument: a `WitnessGeneration`, a structure that allows you to read the runtime values of any variables that were previously created in your program. - -The `compute()` function also needs a type hint to understand what type of [snarky type](./vars.md#snarky-vars) it is creating. - -It then performs the following steps: - -* creates enough [`CVar`](./vars#circuit-vars) to hold the value to be created -* retrieves the auxiliary data needed to create the snarky type (TODO: explain auxiliary data) and create the [`snarky variable`](./vars.md#snarky-vars) out of the `CVar`s and the auxiliary data -* if the `TypeCreation` is set to `Checked`, call the `check()` function on the snarky type (which will constrain the value created), if it is set to `Unsafe` do nothing (in which case we're trusting that the value cannot be malformed, this is mostly used internally and it is highly-likely that users directly making use of `Unsafe` are writing bugs) - -```admonish -At this point we only created variables to hold future values, and made sure that they are constrained. -The actual values will fill the room created by the `CVar` only during the [witness generation](./witness-generation.md). -``` - -## Constraints - -All other functions exposed by the API are basically here to operate on variables and create constraints in doing so. diff --git a/book/src/snarky/kimchi-backend.md b/book/src/snarky/kimchi-backend.md deleted file mode 100644 index 2d2ebf789a..0000000000 --- a/book/src/snarky/kimchi-backend.md +++ /dev/null @@ -1,234 +0,0 @@ -# Kimchi Backend - -![](https://i.imgur.com/KmKU5Pl.jpg) - -Underneath the snarky wrapper (in `snarky/checked_runner.rs`) lies what we used to call the `plonk_constraint_system` or `kimchi_backend` in `snarky/constraint_systen.rs`. - -```admonish -It is good to note that we're planning on removing this abstract separation between the snarky wrapper and the constraint system. -``` - -The logic in the kimchi backend serves two purposes: - -* **Circuit generation**. It is the logic that adds gates to our list of gates (representing the circuit). For most of these gates, the variables used are passed to the backend by the snarky wrapper, but some of them are created by the backend itself (see more in the [variables section](#variables)). -* **Witness generation**. It is the logic that creates the witness - -One can also perform two additional operations once the constraint system has been compiled: - -* Generate the prover and verifier index for the system. -* Get a hash of the constraint system (this includes the circuit, the number of public input) (TODO: verify that this is true) (TODO: what else should be in that hash? a version of snarky and a version of kimchi?). - -## A circuit - -A circuit is either being built, or has been contructed during a circuit generation phase: - -```rust -enum Circuit -where - F: PrimeField, -{ - /** A circuit still being written. */ - Unfinalized(Vec>), - /** Once finalized, a circuit is represented as a digest - and a list of gates that corresponds to the circuit. - */ - Compiled([u8; 32], Vec>), -} -``` - -## State - -The state of the kimchi backend looks like this: - -```rust -where - Field: PrimeField, -{ - /// A counter used to track variables - /// (similar to the one in the snarky wrapper) - next_internal_var: usize, - - /// Instruction on how to compute each internal variable - /// (as a linear combination of other variables). - /// Used during witness generation. - internal_vars: HashMap, Option)>, - - /// The symbolic execution trace table. - /// Each cell is a variable that takes a value during witness generation. - /// (if not set, it will take the value 0). - rows: Vec>>, - - /// The circuit once compiled - gates: Circuit, - - /// The row to use the next time we add a constraint. - // TODO: I think we can delete this - next_row: usize, - - /// The size of the public input - /// (which fills the first rows of our constraint system. - public_input_size: Option, - - // omitted values... -} -``` - -## Variables - -In the backend, there's two types of variables: - -```rust -enum V { - /// An external variable - /// (generated by snarky, via [exists]). - External(usize), - - /// An internal variable is generated to hold an intermediate value, - /// (e.g. in reducing linear combinations to single PLONK positions). - Internal(InternalVar), -} -``` - -Internal variables are basically a `usize` pointing to a hashmap in the state. - -That hashmap tells you how to compute the internal variable during witness generation: it is always a linear combination of other variables (and a constant). - -## Circuit generation - -During circuit generation, the snarky wrapper will make calls to the `add_constraint()` or `add_basic_snarky_constraint` function of the kimchi backend, specifying what gate to use and what variables to use in that gate. - -At this point, the snarky wrapper might have some variables that are not yet tracked as such (with a counter). -Rather, they are constants, or they are a combination of other variables. -You can see that as a small AST representing how to compute a variable. -(See the [variables section](./vars.md#circuit-vars) for more details). - -For this reason, they can hide a number of operations that haven't been constrained yet. -It is the role of the `add_constrain` logic to enforce that at this point constants, as well as linear combinations or scalings of variables, are encoded in the circuit. -This is done by adding enough generic gates (using the `reduce_lincom()` or `reduce_to_var()` functions). - -```admonish -This is a remnant of an optimization targetting R1CS (in which additions are for free). -An issue with this approach is the following: imagine that two circuit variables are created from the same circuit variable, imagine also that the original circuit variable contained a long AST, then both variables might end up creating the same constraints to convert that AST. -Currently, snarkyjs and pickles expose a `seal()` function that allows you to reduce this issue, at the cost of some manual work and mental tracking on the developer. -We should probably get rid of this, while making sure that we can continue to optimize generic gates -(in some cases you can merge two generic gates in one (TODO: give an example of where that can happen)). -Another solution is to keep track of what was reduced, and reuse previous reductions (similar to how we handle constants). -``` - -It is during this "reducing" step that internal variables (known only to the kimchi backend) are created. - -```admonish -The process is quite safe, as the kimchi backend cannot use the snarky wrapper variables directly (which are of type `CVar`). -Since the expected format (see the [variables section](#variables) is a number (of type `usize`), the only way to convert a non-tracked variable (constant, or scale, or linear combination) is to reduce it (and in the process constraining its value). -``` - -Depending on the gate being used, several constraints might be added via the `add_row()` function which does three things: - -1. figure out if there's any wiring to be done -2. add a gate to our list of gates (representing the circuit) -3. add the variables to our _symbolic_ execution trace table (symbolic in the sense that nothing has values yet) - -This process happens as the circuit is "parsed" and the constraint functions of the kimchi backend are called. - -This does not lead to a finalized circuit, see the next section to see how that is done. - -(TODO: ideally this should happen in the same step) - -## Finalization of the circuit. - -So far we've only talked about adding specific constraints to the circuit, but not about how public input are handled. - -The `finalization()` function of the kimchi backend does the following: - -* add as many generic rows as there are public inputs. -* construct the permutation -* computes a cache of the circuit (TODO: this is so unecessary) -* and other things that are not that important - -## Witness generation - -Witness generation happens by taking the finalized state (in the `compute_witness()` function) with a callback that can be used to retrieve the values of external variables (public input and public output). - -The algorithm follows these steps using the symbolic execution table we built during circuit generation: - -1. it initializes the execution trace table with zeros -2. go through the rows related to the public input and set the most-left column values to the ones obtained by the callback. -3. go through the other rows and compute the value of the variables left in the table - -Variables in step 3. should either: - -* be absent (`None`) and evaluated to the default value 0 -* point to an external variable, in which case the closure passed can be used to retrieve the value -* be an internal variable, in which case the value is computed by evaluating the AST that was used to create it. - -## Permutation - -The permutation is used to wire cells of the execution trace table (specifically, cells belonging to the first 7 columns). -It is also known as "copy constraints". - -```admonish -In snarky, the permutation is represented differently from kimchi, and thus needs to be converted to the kimchi's format before a proof can be created. -TODO: merge the representations -``` - -We use the permutation in ingenious ways to optimize circuits. -For example, we use it to encode each constants once, and wire it to places where it is used. -Another example, is that we use it to assert equality between two cells. - -## Implementation details - -There's two aspect of the implementation of the permutation, the first one is a hashmap of equivalence classes, which is used to track all the positions of a variable, the second one is making use of a [union find]() data structure to link variables that are equivalent (we'll talk about that after). - -The two data structures are in the kimchi backend's state: - -```rust -pub struct SnarkyConstraintSystem -where - Field: PrimeField, -{ - equivalence_classes: HashMap>>, - union_finds: disjoint_set::DisjointSet, - // omitted fields... -} -``` - -### equivalence classes - -As said previously, during circuit generation a symbolic execution trace table is created. It should look a bit like this (if there were only 3 columns and 4 rows): - -| | 0 | 1 | 2 | -| :-: | :-: | :-: | :-:| -| 0 | v1 | v1 | | -| 1 | | v2 | | -| 2 | | v2 | | -| 3 | | | v1 | - -From that, it should be clear that all the cells containing the variable `v1` should be connected, -and all the cells containing the variable `v2` should be as well. - -The format that the permutation expects is a [cycle](https://en.wikipedia.org/wiki/Cyclic_permutation): a list of cells where each cell is linked to the next, the last one wrapping around and linking to the first one. - -For example, a cycle for the `v1` variable could be: - -``` -(0, 0) -> (0, 1) -(0, 1) -> (3, 2) -(3, 2) -> (0, 0) -``` - -During circuit generation, a hashmap (called `equivalence_classes`) is used to track all the positions (row and column) of each variable. - -During finalization, all the different cycles are created by looking at all the variables existing in the hashmap. - -### Union finds - -Sometimes, we know that two variables will have equivalent values due to an `assert_equal()` being called to link them. -Since we link two variables together, they need to be part of the same cycle, and as such we need to be able to detect that to construct correct cycles. - -To do this, we use a [union find]() data structure, which allows us to easily find the unions of equivalent variables. - -When an `assert_equal()` is called, we link the two variables together using the `union_finds` data structure. - -During finalization, when we create the cycles, we use the `union_finds` data structure to find the equivalent variables. -We then create a new equivalence classes hashmap to merge the keys (variables) that are in the same set. -This is done before using the equivalence classes hashmap to construct the cycles. diff --git a/book/src/snarky/overview.md b/book/src/snarky/overview.md deleted file mode 100644 index b67c1fa30b..0000000000 --- a/book/src/snarky/overview.md +++ /dev/null @@ -1,32 +0,0 @@ -# Snarky - -Snarky is a frontend to the [kimchi proof system](../kimchi/overview.md). - -It allows users to write circuits that can be proven using kimchi. - -This part of the Mina book documents both how to use snarky, and how its internals work. - -```admonish -Snarky was originally an OCaml library. It also is known as a typescript library: SnarkyJS. -This documentation talks about the Rust implementation, which one can refer to as snarky-rs (but we will just say snarky from now on). -``` - -## High-level design - -Snarky is divided into two parts: - -* **Circuit-generation**: which is also called the setup or compilation phase. It is when snarky turn code written using its library, to a circuit that kimchi can understand. This can later be used by kimchi to produce prover and verifier keys. -* **Witness-generation**: which is also called the proving, or runtime phase. It is when snarky executes the written program and records its state at various point in time to create an execution trace of the program (which we call witness here). This can later be used by kimchi, with a proving key, to produce a zero-knowledge proof. - -A snarky program is constructed using functions exposed by the library. -The API of snarky that one can use to design circuits can be split in three categories: - -* creation of snarky variables (via `compute()`) -* creation of constraints (via `assert` type-functions) -* manipulation of snarky variables (which can sometimes create constraints) - -Snarky itself is divided into three parts: - -* [The high-level API](./api.md) that you can find in `api.rs` and `traits.rs` -* [The snarky wrapper](./snarky-wrapper.md), which contains the logic for creating user variables and composed types (see the section on [Snarky vars](./vars.md#snarky-vars)). -* [The kimchi backend](./kimchi-backend.md), which contains the logic for constructing the circuit as well as the witness. diff --git a/book/src/snarky/snarky-wrapper.md b/book/src/snarky/snarky-wrapper.md deleted file mode 100644 index 725f7c35ec..0000000000 --- a/book/src/snarky/snarky-wrapper.md +++ /dev/null @@ -1,70 +0,0 @@ -# Snarky wrapper - -Snarky, as of today, is constructed as two parts: - -* a snarky wrapper, which is explained in this document -* a backend underneath that wrapper, explained in the [kimchi backend section](./kimchi-backend.md) - -```admonish -This separation exists for legacy reasons, and ideally we should merge the two into a single library. -``` - -The snarky wrapper mostly exists in `checked_runner.rs`, and has the following state: - -```rust -where - F: PrimeField, -{ - /// The constraint system used to build the circuit. - /// If not set, the constraint system is not built. - system: Option>, - - /// The public input of the circuit used in witness generation. - // TODO: can we merge public_input and private_input? - public_input: Vec, - - // TODO: we could also just store `usize` here - pub(crate) public_output: Vec>, - - /// The private input of the circuit used in witness generation. Still not sure what that is, or why we care about this. - private_input: Vec, - - /// If set, the witness generation will check if the constraints are satisfied. - /// This is useful to simulate running the circuit and return an error if an assertion fails. - eval_constraints: bool, - - /// The number of public inputs. - num_public_inputs: usize, - - /// A counter used to track variables (this includes public inputs) as they're being created. - next_var: usize, - - /// Indication that we're running the witness generation (as opposed to the circuit creation). - mode: Mode, -} -``` - -The wrapper is designed to be used in different ways, depending on the fields set. - -```admonish -Ideally, we would like to only run this once and obtain a result that's an immutable compiled artifact. -Currently, `public_input`, `private_input`, `eval_constriants`, `next_var`, and `mode` all need to be mutable. -In the future these should be passed as arguments to functions, and should not exist in the state. -``` - -## Public output - -The support for public output is implemented as kind of a hack. - -When the developer writes a circuit, they have to specify the type of the public output. - -This allows the API to save enough room at the end of the public input, and store the variables used in the public output in the state. - -When the API calls the circuit written by the developer, it expects the public output (as a snarky type) to be returned by the function. -The compilation or proving API that ends up calling that function, can thus obtain the variables of the public output. -With that in hand, the API can continue to write the circuit to enforce an equality constraint between these variables being returned and the public output variable that it had previously stored in the state. - -Essentially, the kimchi backend will turn this into as many wiring as there are `CVar` in the public output. - -During witness generation, we need a way to modify the witness once we know the values of the public output. -As the public output `CVar`s were generated from the snarky wrapper (and not from the kimchi backend), the snarky wrapper should know their values after running the given circuit. diff --git a/book/src/snarky/vars.md b/book/src/snarky/vars.md deleted file mode 100644 index 7a1e3a3be7..0000000000 --- a/book/src/snarky/vars.md +++ /dev/null @@ -1,135 +0,0 @@ -# Vars - -In this section we will introduce two types of variables: - -* Circuit vars, or `CVar`s, which are low-level variables representing field elements. -* Snarky vars, which are high-level variables that user can use to create more meaningful programs. - -## Circuit vars - -In snarky, we first define circuit variables (TODO: rename Field variable?) which represent field elements in a circuit. -These circuit variables, or cvars, can be represented differently in the system: - -```rust -pub enum CVar -where - F: PrimeField, -{ - /// A constant. - Constant(F), - - /// A variable that can be refered to via a `usize`. - Var(usize), - - /// The addition of two other [CVar]s. - Add(Box>, Box>), - - /// Scaling of a [CVar]. - Scale(F, Box>), -} -``` - -One can see a CVar as an AST, where two atoms exist: a `Var(usize)` which represents a private input, an a `Constant(F)` which represents a constant. -Anything else represents combinations of these two atoms. - -### Constants - -Note that a circuit variable does not represent a value that has been constrained in the circuit (yet). -This is why we need to know if a cvar is a constant, so that we can avoid constraining it too early. -For example, the following code does not encode 2 or 1 in the circuit, but will encode 3: - -```rust -let x: CVar = state.exists(|_| 2) + state.exists(|_| 3); -state.assert_eq(x, y); // 3 and y will be encoded in the circuit -``` - -whereas the following code will encode all variables: - -```rust -let x = y + y; -let one: CVar = state.exists(|_| 1); -assert_eq(x, one); -``` - -### Non-constants - -Right after being created, a `CVar` is not constrained yet, and needs to be constrained by the application. -That is unless the application wants the `CVar` to be a constant that will not need to be constrained (see previous example) or because the application wants the `CVar` to be a random value (unlikely) (TODO: we should add a "rand" function for that). - -In any case, a circuit variable which is not a constant has a value that is not known yet at circuit-generation time. -In some situations, we might not want to constrain the - - -### When do variables get constrained? - -In general, a circuit variable only gets constrained by an assertion call like `assert` or `assert_equals`. - -When variables are added together, or scaled, they do not directly get constrained. -This is due to optimizations targetting R1CS (which we don't support anymore) that were implemented in the original snarky library, and that we have kept in snarky-rs. - -Imagine the following example: - -```rust -let y = x1 + x2 + x3 +.... ; -let z = y + 3; -assert_eq(y, 6); -assert_eq(z, 7); -``` - -The first two lines will not create constraints, but simply create minimal ASTs that track all of the additions. - -Both assert calls will then reduce the variables to a single circuit variable, creating the same constraints twice. - -For this reason, there's a function `seal()` defined in pickles and snarkyjs. (TODO: more about `seal()`, and why is it not in snarky?) (TODO: remove the R1CS optimization) - -## Snarky vars - -Handling `CVar`s can be cumbersome, as they can only represent a single field element. -We might want to represent values that are either in a smaller range (e.g. [booleans](./booleans.md)) or that are made out of several `CVar`s. - -For this, snarky's API exposes the following trait, which allows users to define their own types: - -```rust -pub trait SnarkyType: Sized -where - F: PrimeField, -{ - /// ? - type Auxiliary; - - /// The equivalent type outside of the circuit. - type OutOfCircuit; - - const SIZE_IN_FIELD_ELEMENTS: usize; - - fn to_cvars(&self) -> (Vec>, Self::Auxiliary); - - fn from_cvars_unsafe(cvars: Vec>, aux: Self::Auxiliary) -> Self; - - fn check(&self, cs: &mut RunState); - - fn deserialize(&self) -> (Self::OutOfCircuit, Self::Auxiliary); - - fn serialize(out_of_circuit: Self::OutOfCircuit, aux: Self::Auxiliary) -> Self; - - fn constraint_system_auxiliary() -> Self::Auxiliary; - - fn value_to_field_elements(x: &Self::OutOfCircuit) -> (Vec, Self::Auxiliary); - - fn value_of_field_elements(x: (Vec, Self::Auxiliary)) -> Self::OutOfCircuit; -} -``` - -Such types are always handled as `OutOfCircuit` types (e.g. `bool`) by the users, and as a type implementing `SnarkyType` by snarky (e.g. [`Boolean`](./booleans.md)). -Thus, the user can pass them to snarky in two ways: - -**As public inputs**. In this case they will be serialized into field elements for snarky before [witness-generation](./witness-generation.md) (via the `value_to_field_elements()` function) - -**As private inputs**. In this case, they must be created using the `compute()` function with a closure returning an `OutOfCircuit` value by the user. -The call to `compute()` will need to have some type hint, for snarky to understand what `SnarkyType` it is creating. -This is because the relationship is currently only one-way: a `SnarkyType` knows what out-of-circuit type it relates to, but not the other way is not true. -(TODO: should we implement that though?) - -A `SnarkyType` always implements a `check()` function, which is called by snarky when `compute()` is called to create such a type. -The `check()` function is responsible for creating the constraints that sanitize the newly-created `SnarkyType` (and its underlying `CVar`s). -For example, creating a boolean would make sure that the underlying `CVar` is either 0 or 1. diff --git a/book/src/snarky/witness-generation.md b/book/src/snarky/witness-generation.md deleted file mode 100644 index 41fbc3b5f1..0000000000 --- a/book/src/snarky/witness-generation.md +++ /dev/null @@ -1,21 +0,0 @@ -# Witness generation - -In snarky, currently, the same code is run through again to generate the witness. - -That is, the `RunState` contains a few changes: - -* **`public_input: Vec`**: now contains concrete values (instead of being empty). -* **`has_witness`**: is set to `WitnessGeneration`. - -Additionaly, if we want to verify that the arguments are actually correct (and that the program implemented does not fail) we can also set `eval_constraints` to `true` (defaults to `false`) to verify that the program has a correct state at all point in time. - -If we do not do this, the user will only detect failure during proof generation (specifically when the [composition polynomial](../specs/kimchi.md#proof-creation) is divided by the [vanishing polynomial](../specs/kimchi.md#proof-creation)). - -```admonish -This is implemented by simply checking that each [generic gate](../specs/kimchi.md#double-generic-gate) encountered is correct, in relation to the witness values observed in that row. -In other words $c_0 l + c_1 r + c_2 o + c_3 l r + c_4 = 0$ (extrapolated to the [double generic gate](../specs/kimchi.md#double-generic-gate)). -Note that other custom gates are not checked, as they are wrapped by [gadgets](../specs/kimchi.md#gates) which fill in witness values instead of the user. -Thus there is no room for user error (i.e. the user entering a wrong private input). -``` - -Due to the `has_witness` variable set to `WitnessGeneration`, functions will behave differently and compute actual values instead of generating constraints. diff --git a/book/src/specs/kimchi.md b/book/src/specs/kimchi.md index e7f03ab2ff..cffb7b02aa 100644 --- a/book/src/specs/kimchi.md +++ b/book/src/specs/kimchi.md @@ -310,10 +310,11 @@ z_2 = &\ (w_0(g^i) + \sigma_0 \cdot beta + \gamma) \cdot \\ \end{align} $$ -If computed correctly, we should have $z(g^{n-3}) = 1$. +We randomize the evaluations at `n - zk_rows + 1` and `n - zk_rows + 2` in order to add +zero-knowledge to the protocol. + +For a valid witness, we then have have $z(g^{n-zk_rows}) = 1$. -Finally, randomize the last `EVAL_POINTS` evaluations $z(g^{n-2})$ and $z(g^{n-1})$, -in order to add zero-knowledge to the protocol. ### Lookup @@ -1607,11 +1608,34 @@ def sample(domain, i): The compilation steps to create the common index are as follow: 1. If the circuit is less than 2 gates, abort. -2. Create a domain for the circuit. That is, +1. Compute the number of zero-knowledge rows (`zk_rows`) that will be required to + achieve zero-knowledge. The following constraints apply to `zk_rows`: + * The number of chunks `c` results in an evaluation at `zeta` and `zeta * omega` in + each column for `2*c` evaluations per column, so `zk_rows >= 2*c + 1`. + * The permutation argument interacts with the `c` chunks in parallel, so it is + possible to cross-correlate between them to compromise zero knowledge. We know + that there is some `c >= 1` such that `zk_rows = 2*c + k` from the above. Thus, + attempting to find the evaluation at a new point, we find that: + * the evaluation of every witness column in the permutation contains `k` unknowns; + * the evaluations of the permutation argument aggregation has `k-1` unknowns; + * the permutation argument applies on all but `zk_rows - 3` rows; + * and thus we form the equation `zk_rows - 3 < 7 * k + (k - 1)` to ensure that we + can construct fewer equations than we have unknowns. + + This simplifies to `k > (2 * c - 2) / 7`, giving `zk_rows > (16 * c - 2) / 7`. + We can derive `c` from the `max_poly_size` supported by the URS, and thus we find + `zk_rows` and `domain_size` satisfying the fixpoint + + ```text + zk_rows = (16 * (domain_size / max_poly_size) + 5) / 7 + domain_size = circuit_size + zk_rows + ``` + +1. Create a domain for the circuit. That is, compute the smallest subgroup of the field that - has order greater or equal to `n + ZK_ROWS` elements. -3. Pad the circuit: add zero gates to reach the domain size. -4. sample the `PERMUTS` shifts. + has order greater or equal to `n + zk_rows` elements. +1. Pad the circuit: add zero gates to reach the domain size. +1. sample the `PERMUTS` shifts. ### Lookup Index @@ -1689,14 +1713,18 @@ Both the prover and the verifier index, besides the common parts described above These pre-computations are optimizations, in the context of normal proofs, but they are necessary for recursion. ```rs -pub struct ProverIndex { +pub struct ProverIndex< + G: KimchiCurve, + OpeningProof: OpenProof, + const COLUMNS: usize = KIMCHI_COLS, +> { /// constraints system polynomials #[serde(bound = "ConstraintSystem: Serialize + DeserializeOwned")] pub cs: ConstraintSystem, /// The symbolic linearization of our circuit, which can compile to concrete types once certain values are learned in the protocol. #[serde(skip)] - pub linearization: Linearization>>, + pub linearization: Linearization>, Column>, /// The mapping between powers of alpha and constraints #[serde(skip)] @@ -1704,17 +1732,18 @@ pub struct ProverIndex { /// polynomial commitment keys #[serde(skip)] - pub srs: Arc>, + #[serde(bound(deserialize = "OpeningProof::SRS: Default"))] + pub srs: Arc, /// maximal size of polynomial section pub max_poly_size: usize, - #[serde(bound = "ColumnEvaluations: Serialize + DeserializeOwned")] - pub column_evaluations: ColumnEvaluations, + #[serde(bound = "ColumnEvaluations: Serialize + DeserializeOwned")] + pub column_evaluations: ColumnEvaluations, /// The verifier index corresponding to this prover index #[serde(skip)] - pub verifier_index: Option>, + pub verifier_index: Option>, /// The verifier index digest corresponding to this prover index #[serde_as(as = "Option")] @@ -1752,15 +1781,22 @@ pub struct LookupVerifierIndex { #[serde_as] #[derive(Serialize, Deserialize, Debug, Clone)] -pub struct VerifierIndex { +pub struct VerifierIndex< + G: KimchiCurve, + OpeningProof: OpenProof, + const COLUMNS: usize = KIMCHI_COLS, +> { /// evaluation domain #[serde_as(as = "o1_utils::serialization::SerdeAs")] pub domain: D, /// maximal size of polynomial section pub max_poly_size: usize, + /// the number of randomized rows to achieve zero knowledge + pub zk_rows: u64, /// polynomial commitment keys #[serde(skip)] - pub srs: OnceCell>>, + #[serde(bound(deserialize = "OpeningProof::SRS: Default"))] + pub srs: Arc, /// number of public inputs pub public: usize, /// number of previous evaluation challenges, for recursive proving @@ -1771,8 +1807,8 @@ pub struct VerifierIndex { #[serde(bound = "PolyComm: Serialize + DeserializeOwned")] pub sigma_comm: [PolyComm; PERMUTS], /// coefficient commitment array - #[serde(bound = "PolyComm: Serialize + DeserializeOwned")] - pub coefficients_comm: Vec>, + #[serde_as(as = "[_; COLUMNS]")] + pub coefficients_comm: [PolyComm; COLUMNS], /// coefficient commitment array #[serde(bound = "PolyComm: Serialize + DeserializeOwned")] pub generic_comm: PolyComm, @@ -1833,7 +1869,7 @@ pub struct VerifierIndex { pub shift: [G::ScalarField; PERMUTS], /// zero-knowledge polynomial #[serde(skip)] - pub zkpm: OnceCell>, + pub permutation_vanishing_polynomial_m: OnceCell>, // TODO(mimoo): isn't this redundant with domain.d1.group_gen ? /// domain offset for zero-knowledge #[serde(skip)] @@ -1846,7 +1882,7 @@ pub struct VerifierIndex { pub lookup_index: Option>, #[serde(skip)] - pub linearization: Linearization>>, + pub linearization: Linearization>, Column>, /// The mapping between powers of alpha and constraints #[serde(skip)] pub powers_of_alpha: Alphas, @@ -1947,44 +1983,84 @@ pub struct PointEvaluations { pub zeta_omega: Evals, } -/// Evaluations of lookup polynomials -#[serde_as] -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct LookupEvaluations { - /// sorted lookup table polynomial - pub sorted: Vec, - /// lookup aggregation polynomial - pub aggreg: Evals, - // TODO: May be possible to optimize this away? - /// lookup table polynomial - pub table: Evals, - - /// Optionally, a runtime table polynomial. - pub runtime: Option, -} - // TODO: this should really be vectors here, perhaps create another type for chunked evaluations? /// Polynomial evaluations contained in a `ProverProof`. /// - **Chunked evaluations** `Field` is instantiated with vectors with a length that equals the length of the chunk /// - **Non chunked evaluations** `Field` is instantiated with a field, so they are single-sized#[serde_as] #[serde_as] #[derive(Debug, Clone, Serialize, Deserialize)] -pub struct ProofEvaluations { +pub struct ProofEvaluations { + /// public input polynomials + pub public: Option, /// witness polynomials - pub w: Vec, + #[serde_as(as = "[_; COLUMNS]")] + pub w: [Evals; COLUMNS], /// permutation polynomial pub z: Evals, /// permutation polynomials /// (PERMUTS-1 evaluations because the last permutation is only used in commitment form) pub s: [Evals; PERMUTS - 1], /// coefficient polynomials - pub coefficients: Vec, - /// lookup-related evaluations - pub lookup: Option>, + #[serde_as(as = "[_; COLUMNS]")] + pub coefficients: [Evals; COLUMNS], /// evaluation of the generic selector polynomial pub generic_selector: Evals, /// evaluation of the poseidon selector polynomial pub poseidon_selector: Evals, + /// evaluation of the elliptic curve addition selector polynomial + pub complete_add_selector: Evals, + /// evaluation of the elliptic curve variable base scalar multiplication selector polynomial + pub mul_selector: Evals, + /// evaluation of the endoscalar multiplication selector polynomial + pub emul_selector: Evals, + /// evaluation of the endoscalar multiplication scalar computation selector polynomial + pub endomul_scalar_selector: Evals, + + // Optional gates + /// evaluation of the RangeCheck0 selector polynomial + pub range_check0_selector: Option, + /// evaluation of the RangeCheck1 selector polynomial + pub range_check1_selector: Option, + /// evaluation of the ForeignFieldAdd selector polynomial + pub foreign_field_add_selector: Option, + /// evaluation of the ForeignFieldMul selector polynomial + pub foreign_field_mul_selector: Option, + /// evaluation of the Xor selector polynomial + pub xor_selector: Option, + /// evaluation of the Rot selector polynomial + pub rot_selector: Option, + /// evaluation of the KeccakRound selector polynomial + pub keccak_round_selector: Option, + /// evaluation of the KeccakRound selector polynomial + pub keccak_sponge_selector: Option, + + // lookup-related evaluations + /// evaluation of lookup aggregation polynomial + pub lookup_aggregation: Option, + /// evaluation of lookup table polynomial + pub lookup_table: Option, + /// evaluation of lookup sorted polynomials + pub lookup_sorted: [Option; 5], + /// evaluation of runtime lookup table polynomial + pub runtime_lookup_table: Option, + + // lookup selectors + /// evaluation of the runtime lookup table selector polynomial + pub runtime_lookup_table_selector: Option, + /// evaluation of the Xor range check pattern selector polynomial + pub xor_lookup_selector: Option, + /// evaluation of the Lookup range check pattern selector polynomial + pub lookup_gate_lookup_selector: Option, + /// evaluation of the RangeCheck range check pattern selector polynomial + pub range_check_lookup_selector: Option, + /// evaluation of the ForeignFieldMul range check pattern selector polynomial + pub foreign_field_mul_lookup_selector: Option, + /* + /// evaluation of the KeccakRound pattern selector polynomial + pub keccak_round_lookup_selector: Option, + /// evaluation of the KeccakSponge pattern selector polynomial + pub keccak_sponge_lookup_selector: Option, + */ } /// Commitments linked to the lookup feature @@ -2004,7 +2080,7 @@ pub struct LookupCommitments { #[serde_as] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(bound = "G: ark_serialize::CanonicalDeserialize + ark_serialize::CanonicalSerialize")] -pub struct ProverCommitments { +pub struct ProverCommitments { /// The commitments to the witness (execution trace) pub w_comm: Vec>, /// The commitment to the permutation polynomial @@ -2019,15 +2095,19 @@ pub struct ProverCommitments { #[serde_as] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(bound = "G: ark_serialize::CanonicalDeserialize + ark_serialize::CanonicalSerialize")] -pub struct ProverProof { +pub struct ProverProof { /// All the polynomial commitments required in the proof - pub commitments: ProverCommitments, + pub commitments: ProverCommitments, /// batched commitment opening proof - pub proof: OpeningProof, + #[serde(bound( + serialize = "OpeningProof: Serialize", + deserialize = "OpeningProof: Deserialize<'de>" + ))] + pub proof: OpeningProof, /// Two evaluations over a number of committed polynomials - pub evals: ProofEvaluations>>, + pub evals: ProofEvaluations>, COLUMNS>, /// Required evaluation for [Maller's optimization](https://o1-labs.github.io/mina-book/crypto/plonk/maller_15.html#the-evaluation-of-l) #[serde_as(as = "o1_utils::serialization::SerdeAs")] @@ -2082,10 +2162,10 @@ The prover then follows the following steps to create the proof: 1. Ensure we have room in the witness for the zero-knowledge rows. We currently expect the witness not to be of the same length as the domain, but instead be of the length of the (smaller) circuit. - If we cannot add `ZK_ROWS` rows to the columns of the witness before reaching + If we cannot add `zk_rows` rows to the columns of the witness before reaching the size of the domain, abort. 1. Pad the witness columns with Zero gates to make them the same length as the domain. - Then, randomize the last `ZK_ROWS` of each columns. + Then, randomize the last `zk_rows` of each columns. 1. Setup the Fq-Sponge. 1. Absorb the digest of the VerifierIndex. 1. Absorb the commitments of the previous challenges with the Fq-sponge. @@ -2098,12 +2178,12 @@ The prover then follows the following steps to create the proof: Note: unlike the original PLONK protocol, the prover also provides evaluations of the public polynomial to help the verifier circuit. This is why we need to absorb the commitment to the public polynomial at this point. -1. Commit to the witness columns by creating `COLUMNS` hidding commitments. +1. Commit to the witness columns by creating `KIMCHI_COLS` hidding commitments. Note: since the witness is in evaluation form, we can use the `commit_evaluation` optimization. 1. Absorb the witness commitments with the Fq-Sponge. -1. Compute the witness polynomials by interpolating each `COLUMNS` of the witness. +1. Compute the witness polynomials by interpolating each `KIMCHI_COLS` of the witness. As mentioned above, we commit using the evaluations form rather than the coefficients form so we can take advantage of the sparsity of the evaluations (i.e., there are many 0 entries and entries that have less-than-full-size field elemnts.) @@ -2149,7 +2229,6 @@ The prover then follows the following steps to create the proof: and by then dividing the resulting polynomial with the vanishing polynomial $Z_H$. TODO: specify the split of the permutation polynomial into perm and bnd? 1. commit (hiding) to the quotient polynomial $t$ - TODO: specify the dummies 1. Absorb the the commitment of the quotient polynomial with the Fq-Sponge. 1. Sample $\zeta'$ with the Fq-Sponge. 1. Derive $\zeta$ from $\zeta'$ using the endomorphism (TODO: specify) @@ -2186,7 +2265,6 @@ The prover then follows the following steps to create the proof: 1. Squeeze the Fq-sponge and absorb the result with the Fr-Sponge. 1. Absorb the previous recursion challenges. 1. Compute evaluations for the previous recursion challenges. -1. Evaluate the negated public polynomial (if present) at $\zeta$ and $\zeta\omega$. 1. Absorb the unique evaluation of ft: $ft(\zeta\omega)$. 1. Absorb all the polynomial evaluations in $\zeta$ and $\zeta\omega$: * the public polynomial @@ -2210,12 +2288,14 @@ The prover then follows the following steps to create the proof: * the poseidon selector * the 15 registers/witness columns * the 6 sigmas + * the optional gates * optionally, the runtime table 1. if using lookup: * add the lookup sorted polynomials * add the lookup aggreg polynomial * add the combined table polynomial * if present, add the runtime table polynomial + * the lookup selectors 1. Create an aggregated evaluation proof for all of these polynomials at $\zeta$ and $\zeta\omega$ using $u$ and $v$. @@ -2248,7 +2328,7 @@ We run the following algorithm: 1. Absorb the commitment to the permutation trace with the Fq-Sponge. 1. Sample $\alpha'$ with the Fq-Sponge. 1. Derive $\alpha$ from $\alpha'$ using the endomorphism (TODO: details). -1. Enforce that the length of the $t$ commitment is of size `PERMUTS`. +1. Enforce that the length of the $t$ commitment is of size 7. 1. Absorb the commitment to the quotient polynomial $t$ into the argument. 1. Sample $\zeta'$ with the Fq-Sponge. 1. Derive $\zeta$ from $\zeta'$ using the endomorphism (TODO: specify). @@ -2306,6 +2386,7 @@ Essentially, this steps verifies that $f(\zeta) = t(\zeta) * Z_H(\zeta)$. * witness commitments * coefficient commitments * sigma commitments + * optional gate commitments * lookup commitments #### Batch verification of proofs diff --git a/kimchi/Cargo.toml b/kimchi/Cargo.toml index a5d63000e8..667b9c232c 100644 --- a/kimchi/Cargo.toml +++ b/kimchi/Cargo.toml @@ -18,6 +18,7 @@ ark-ff = { version = "0.3.0", features = [ "parallel", "asm" ] } ark-ec = { version = "0.3.0", features = [ "parallel" ] } ark-poly = { version = "0.3.0", features = [ "parallel" ] } ark-serialize = "0.3.0" +ark-bn254 = { version = "0.3.0", optional = true } blake2 = "0.10.0" num-bigint = { version = "0.4.3", features = ["rand", "serde"]} num-derive = "0.3" @@ -82,5 +83,6 @@ harness = false default = [] internal_tracing = [ "internal-tracing/enabled" ] ocaml_types = [ "ocaml", "ocaml-gen", "poly-commitment/ocaml_types", "mina-poseidon/ocaml_types", "internal-tracing/ocaml_types" ] +bn254 = [ "ark-bn254" ] wasm_types = [ "wasm-bindgen" ] check_feature_flags = [] diff --git a/kimchi/src/alphas.rs b/kimchi/src/alphas.rs index e41c494fea..f3fe7b6563 100644 --- a/kimchi/src/alphas.rs +++ b/kimchi/src/alphas.rs @@ -242,7 +242,7 @@ mod tests { use std::{fs, path::Path}; use super::*; - use crate::circuits::{gate::GateType, polynomial::COLUMNS}; + use crate::circuits::{gate::GateType, polynomial::KIMCHI_COLS}; use mina_curves::pasta::{Fp, Vesta}; // testing [Builder] @@ -322,9 +322,9 @@ mod tests { #[test] fn get_alphas_for_spec() { let gates = vec![CircuitGate::::zero(Wire::for_row(0)); 2]; - let index = new_index_for_test::(gates, 0); + let index = new_index_for_test::(gates, 0); let (_linearization, powers_of_alpha) = - expr_linearization::(Some(&index.cs.feature_flags), true); + expr_linearization::(Some(&index.cs.feature_flags), true); // make sure this is present in the specification let manifest_dir = std::env::var("CARGO_MANIFEST_DIR").unwrap(); let spec_path = Path::new(&manifest_dir) diff --git a/kimchi/src/bench.rs b/kimchi/src/bench.rs index 46114ba7e4..97cb026a2e 100644 --- a/kimchi/src/bench.rs +++ b/kimchi/src/bench.rs @@ -7,13 +7,13 @@ use mina_poseidon::{ sponge::{DefaultFqSponge, DefaultFrSponge}, }; use o1_utils::math; -use poly_commitment::commitment::CommitmentCurve; +use poly_commitment::{commitment::CommitmentCurve, evaluation_proof::OpeningProof}; use crate::{ circuits::{ gate::CircuitGate, polynomials::generic::GenericGateSpec, - wires::{Wire, COLUMNS}, + wires::{Wire, KIMCHI_COLS}, }, proof::ProverProof, prover_index::{testing::new_index_for_test, ProverIndex}, @@ -28,8 +28,8 @@ type ScalarSponge = DefaultFrSponge; pub struct BenchmarkCtx { num_gates: usize, group_map: BWParameters, - index: ProverIndex, - verifier_index: VerifierIndex, + index: ProverIndex>, + verifier_index: VerifierIndex>, } impl BenchmarkCtx { @@ -77,9 +77,9 @@ impl BenchmarkCtx { } /// Produces a proof - pub fn create_proof(&self) -> (ProverProof, Vec) { + pub fn create_proof(&self) -> (ProverProof>, Vec) { // create witness - let witness: [Vec; COLUMNS] = array::from_fn(|_| vec![1u32.into(); self.num_gates]); + let witness: [Vec; KIMCHI_COLS] = array::from_fn(|_| vec![1u32.into(); self.num_gates]); let public_input = witness[0][0..self.index.cs.public].to_vec(); @@ -96,7 +96,8 @@ impl BenchmarkCtx { ) } - pub fn batch_verification(&self, batch: &[(ProverProof, Vec)]) { + #[allow(clippy::type_complexity)] + pub fn batch_verification(&self, batch: &[(ProverProof>, Vec)]) { // verify the proof let batch: Vec<_> = batch .iter() @@ -106,7 +107,11 @@ impl BenchmarkCtx { public_input: public, }) .collect(); - batch_verify::(&self.group_map, &batch).unwrap(); + batch_verify::, KIMCHI_COLS>( + &self.group_map, + &batch, + ) + .unwrap(); } } diff --git a/kimchi/src/circuits/argument.rs b/kimchi/src/circuits/argument.rs index 2f862bf1d7..d4bde64d82 100644 --- a/kimchi/src/circuits/argument.rs +++ b/kimchi/src/circuits/argument.rs @@ -13,6 +13,7 @@ use serde::{Deserialize, Serialize}; use super::{ expr::{constraints::ExprOps, Cache, ConstantExpr, Constants}, gate::{CurrOrNext, GateType}, + wires::KIMCHI_COLS, }; use CurrOrNext::{Curr, Next}; @@ -36,8 +37,8 @@ pub enum ArgumentType { /// created with ArgumentData and F = Field or F = PrimeField, then the constraints /// are built as expressions of real field elements and can be evaluated directly on /// the witness without using the prover. -pub struct ArgumentEnv { - data: Option>, +pub struct ArgumentEnv { + data: Option>, phantom_data: PhantomData, } @@ -51,10 +52,14 @@ impl Default for ArgumentEnv { } } -impl> ArgumentEnv { +impl, const COLUMNS: usize> ArgumentEnv { /// Initialize the environment for creating constraints of real field elements that can be /// evaluated directly over the witness without the prover/verifier - pub fn create(witness: ArgumentWitness, coeffs: Vec, constants: Constants) -> Self { + pub fn create( + witness: ArgumentWitness, + coeffs: Vec, + constants: Constants, + ) -> Self { ArgumentEnv { data: Some(ArgumentData { witness, @@ -129,9 +134,9 @@ impl> ArgumentEnv { } /// Argument environment data for constraints of field elements -pub struct ArgumentData { +pub struct ArgumentData { /// Witness rows - pub witness: ArgumentWitness, + pub witness: ArgumentWitness, /// Gate coefficients pub coeffs: Vec, /// Constants @@ -139,14 +144,14 @@ pub struct ArgumentData { } /// Witness data for a argument -pub struct ArgumentWitness { +pub struct ArgumentWitness { /// Witness for current row - pub curr: Vec, + pub curr: [T; COLUMNS], /// Witness for next row - pub next: Vec, + pub next: [T; COLUMNS], } -impl std::ops::Index<(CurrOrNext, usize)> for ArgumentWitness { +impl std::ops::Index<(CurrOrNext, usize)> for ArgumentWitness { type Output = T; fn index(&self, idx: (CurrOrNext, usize)) -> &T { @@ -168,7 +173,10 @@ pub trait Argument { const CONSTRAINTS: u32; /// Constraints for this argument - fn constraint_checks>(env: &ArgumentEnv, cache: &mut Cache) -> Vec; + fn constraint_checks, const COLUMNS: usize>( + env: &ArgumentEnv, + cache: &mut Cache, + ) -> Vec; /// Returns the set of constraints required to prove this argument. fn constraints(cache: &mut Cache) -> Vec> { diff --git a/kimchi/src/circuits/berkeley_columns.rs b/kimchi/src/circuits/berkeley_columns.rs new file mode 100644 index 0000000000..8534221b3c --- /dev/null +++ b/kimchi/src/circuits/berkeley_columns.rs @@ -0,0 +1,162 @@ +use crate::{ + circuits::{ + expr::{self, ColumnEvaluations, Domain, ExprError, GenericColumn}, + gate::{CurrOrNext, GateType}, + lookup::lookups::LookupPattern, + }, + proof::{PointEvaluations, ProofEvaluations}, +}; +use serde::{Deserialize, Serialize}; +use CurrOrNext::{Curr, Next}; + +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize, Deserialize)] +/// A type representing one of the polynomials involved in the PLONK IOP. +pub enum Column { + Witness(usize), + Z, + LookupSorted(usize), + LookupAggreg, + LookupTable, + LookupKindIndex(LookupPattern), + LookupRuntimeSelector, + LookupRuntimeTable, + Index(GateType), + Coefficient(usize), + Permutation(usize), +} + +impl GenericColumn for Column { + fn domain(&self) -> Domain { + match self { + Column::Index(GateType::Generic) => Domain::D4, + Column::Index(GateType::CompleteAdd) => Domain::D4, + _ => Domain::D8, + } + } +} + +impl Column { + pub fn latex(&self) -> String { + match self { + Column::Witness(i) => format!("w_{{{i}}}"), + Column::Z => "Z".to_string(), + Column::LookupSorted(i) => format!("s_{{{i}}}"), + Column::LookupAggreg => "a".to_string(), + Column::LookupTable => "t".to_string(), + Column::LookupKindIndex(i) => format!("k_{{{i:?}}}"), + Column::LookupRuntimeSelector => "rts".to_string(), + Column::LookupRuntimeTable => "rt".to_string(), + Column::Index(gate) => { + format!("{gate:?}") + } + Column::Coefficient(i) => format!("c_{{{i}}}"), + Column::Permutation(i) => format!("sigma_{{{i}}}"), + } + } + + pub fn text(&self) -> String { + match self { + Column::Witness(i) => format!("w[{i}]"), + Column::Z => "Z".to_string(), + Column::LookupSorted(i) => format!("s[{i}]"), + Column::LookupAggreg => "a".to_string(), + Column::LookupTable => "t".to_string(), + Column::LookupKindIndex(i) => format!("k[{i:?}]"), + Column::LookupRuntimeSelector => "rts".to_string(), + Column::LookupRuntimeTable => "rt".to_string(), + Column::Index(gate) => { + format!("{gate:?}") + } + Column::Coefficient(i) => format!("c[{i}]"), + Column::Permutation(i) => format!("sigma_[{i}]"), + } + } +} + +impl expr::Variable { + pub fn ocaml(&self) -> String { + format!("var({:?}, {:?})", self.col, self.row) + } + + pub fn latex(&self) -> String { + let col = self.col.latex(); + match self.row { + Curr => col, + Next => format!("\\tilde{{{col}}}"), + } + } + + pub fn text(&self) -> String { + let col = self.col.text(); + match self.row { + Curr => format!("Curr({col})"), + Next => format!("Next({col})"), + } + } +} + +impl ColumnEvaluations + for ProofEvaluations, COLUMNS> +{ + type Column = Column; + fn evaluate(&self, col: Self::Column) -> Result, ExprError> { + use Column::*; + match col { + Witness(i) => Ok(self.w[i]), + Z => Ok(self.z), + LookupSorted(i) => self.lookup_sorted[i].ok_or(ExprError::MissingIndexEvaluation(col)), + LookupAggreg => self + .lookup_aggregation + .ok_or(ExprError::MissingIndexEvaluation(col)), + LookupTable => self + .lookup_table + .ok_or(ExprError::MissingIndexEvaluation(col)), + LookupRuntimeTable => self + .runtime_lookup_table + .ok_or(ExprError::MissingIndexEvaluation(col)), + Index(GateType::Poseidon) => Ok(self.poseidon_selector), + Index(GateType::Generic) => Ok(self.generic_selector), + Index(GateType::CompleteAdd) => Ok(self.complete_add_selector), + Index(GateType::VarBaseMul) => Ok(self.mul_selector), + Index(GateType::EndoMul) => Ok(self.emul_selector), + Index(GateType::EndoMulScalar) => Ok(self.endomul_scalar_selector), + Index(GateType::RangeCheck0) => self + .range_check0_selector + .ok_or(ExprError::MissingIndexEvaluation(col)), + Index(GateType::RangeCheck1) => self + .range_check1_selector + .ok_or(ExprError::MissingIndexEvaluation(col)), + Index(GateType::ForeignFieldAdd) => self + .foreign_field_add_selector + .ok_or(ExprError::MissingIndexEvaluation(col)), + Index(GateType::ForeignFieldMul) => self + .foreign_field_mul_selector + .ok_or(ExprError::MissingIndexEvaluation(col)), + Index(GateType::Xor16) => self + .xor_selector + .ok_or(ExprError::MissingIndexEvaluation(col)), + Index(GateType::Rot64) => self + .rot_selector + .ok_or(ExprError::MissingIndexEvaluation(col)), + Permutation(i) => Ok(self.s[i]), + Coefficient(i) => Ok(self.coefficients[i]), + LookupKindIndex(LookupPattern::Xor) => self + .xor_lookup_selector + .ok_or(ExprError::MissingIndexEvaluation(col)), + LookupKindIndex(LookupPattern::Lookup) => self + .lookup_gate_lookup_selector + .ok_or(ExprError::MissingIndexEvaluation(col)), + LookupKindIndex(LookupPattern::RangeCheck) => self + .range_check_lookup_selector + .ok_or(ExprError::MissingIndexEvaluation(col)), + LookupKindIndex(LookupPattern::ForeignFieldMul) => self + .foreign_field_mul_lookup_selector + .ok_or(ExprError::MissingIndexEvaluation(col)), + LookupRuntimeSelector => self + .runtime_lookup_table_selector + .ok_or(ExprError::MissingIndexEvaluation(col)), + Index(_) => Err(ExprError::MissingIndexEvaluation(col)), + //LookupKindIndex(_) => Err(ExprError::MissingIndexEvaluation(col)), + } + } +} diff --git a/kimchi/src/circuits/constraints.rs b/kimchi/src/circuits/constraints.rs index 68bf4bd6d8..efa75aaf5d 100644 --- a/kimchi/src/circuits/constraints.rs +++ b/kimchi/src/circuits/constraints.rs @@ -7,11 +7,11 @@ use crate::{ gate::{CircuitGate, GateType}, lookup::{index::LookupConstraintSystem, lookups::LookupFeatures, tables::LookupTable}, polynomial::{WitnessEvals, WitnessOverDomains, WitnessShifts}, - polynomials::permutation::{Shifts, ZK_ROWS}, + polynomials::permutation::Shifts, wires::*, }, curve::KimchiCurve, - error::SetupError, + error::{DomainCreationError, SetupError}, prover_index::ProverIndex, }; use ark_ff::{PrimeField, SquareRootField, Zero}; @@ -21,6 +21,7 @@ use ark_poly::{ }; use o1_utils::ExtendedEvaluations; use once_cell::sync::OnceCell; +use poly_commitment::OpenProof; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use serde_with::serde_as; use std::array; @@ -54,14 +55,14 @@ pub struct FeatureFlags { /// The polynomials representing evaluated columns, in coefficient form. #[serde_as] #[derive(Clone, Serialize, Deserialize, Debug)] -pub struct EvaluatedColumnCoefficients { +pub struct EvaluatedColumnCoefficients { /// permutation coefficients #[serde_as(as = "[o1_utils::serialization::SerdeAs; PERMUTS]")] pub permutation_coefficients: [DP; PERMUTS], /// gate coefficients - #[serde_as(as = "[o1_utils::serialization::SerdeAs; W]")] - pub coefficients: [DP; W], + #[serde_as(as = "[o1_utils::serialization::SerdeAs; COLUMNS]")] + pub coefficients: [DP; COLUMNS], /// generic gate selector #[serde_as(as = "o1_utils::serialization::SerdeAs")] @@ -76,14 +77,14 @@ pub struct EvaluatedColumnCoefficients { /// The evaluations are expanded to the domain size required for their constraints. #[serde_as] #[derive(Clone, Serialize, Deserialize, Debug)] -pub struct ColumnEvaluations { +pub struct ColumnEvaluations { /// permutation coefficients over domain d8 #[serde_as(as = "[o1_utils::serialization::SerdeAs; PERMUTS]")] pub permutation_coefficients8: [E>; PERMUTS], /// coefficients over domain d8 - #[serde_as(as = "[o1_utils::serialization::SerdeAs; W]")] - pub coefficients8: [E>; W], + #[serde_as(as = "[o1_utils::serialization::SerdeAs; COLUMNS]")] + pub coefficients8: [E>; COLUMNS], /// generic selector over domain d4 #[serde_as(as = "o1_utils::serialization::SerdeAs")] @@ -158,6 +159,8 @@ pub struct ConstraintSystem { #[serde(bound = "CircuitGate: Serialize + DeserializeOwned")] pub gates: Vec>, + pub zk_rows: u64, + /// flags for optional features pub feature_flags: FeatureFlags, @@ -201,6 +204,7 @@ pub struct Builder { runtime_tables: Option>>, precomputations: Option>>, disable_gates_checks: bool, + max_poly_size: Option, } /// Create selector polynomial for a circuit gate @@ -258,12 +262,14 @@ impl ConstraintSystem { runtime_tables: None, precomputations: None, disable_gates_checks: false, + max_poly_size: None, } } pub fn precomputations(&self) -> &Arc> { - self.precomputations - .get_or_init(|| Arc::new(DomainConstantEvaluations::create(self.domain).unwrap())) + self.precomputations.get_or_init(|| { + Arc::new(DomainConstantEvaluations::create(self.domain, self.zk_rows).unwrap()) + }) } pub fn set_precomputations(&self, precomputations: Arc>) { @@ -273,17 +279,21 @@ impl ConstraintSystem { } } -impl> - ProverIndex +impl< + F: PrimeField + SquareRootField, + G: KimchiCurve, + OpeningProof: OpenProof, + const COLUMNS: usize, + > ProverIndex { /// This function verifies the consistency of the wire /// assignments (witness) against the constraints /// witness: wire assignment witness /// RETURN: verification status - pub fn verify(&self, witness: &[Vec; W], public: &[F]) -> Result<(), GateError> { + pub fn verify(&self, witness: &[Vec; COLUMNS], public: &[F]) -> Result<(), GateError> { // pad the witness let pad = vec![F::zero(); self.cs.domain.d1.size() - witness[0].len()]; - let witness: [Vec; W] = array::from_fn(|i| { + let witness: [Vec; COLUMNS] = array::from_fn(|i| { let mut w = witness[i].to_vec(); w.extend_from_slice(&pad); w @@ -319,7 +329,7 @@ impl(row, &witness, self, public) + gate.verify::(row, &witness, self, public) .map_err(|err| GateError::Custom { row, err })?; } @@ -330,13 +340,17 @@ impl ConstraintSystem { /// evaluate witness polynomials over domains - pub fn evaluate(&self, w: &[DP; W], z: &DP) -> WitnessOverDomains { + pub fn evaluate( + &self, + w: &[DP; COLUMNS], + z: &DP, + ) -> WitnessOverDomains { // compute shifted witness polynomials - let w8: [E>; W] = + let w8: [E>; COLUMNS] = array::from_fn(|i| w[i].evaluate_over_domain_by_ref(self.domain.d8)); let z8 = z.evaluate_over_domain_by_ref(self.domain.d8); - let w4: [E>; W] = array::from_fn(|i| { + let w4: [E>; COLUMNS] = array::from_fn(|i| { E::>::from_vec_and_domain( (0..self.domain.d4.size) .map(|j| w8[i].evals[2 * j as usize]) @@ -368,14 +382,15 @@ impl ConstraintSystem { } } - pub(crate) fn evaluated_column_coefficients( + pub(crate) fn evaluated_column_coefficients( &self, - ) -> EvaluatedColumnCoefficients { + ) -> EvaluatedColumnCoefficients { // compute permutation polynomials let shifts = Shifts::new(&self.domain.d1); - let mut sigmal1: [Vec; PERMUTS] = - array::from_fn(|_| vec![F::zero(); self.domain.d1.size()]); + let n = self.domain.d1.size(); + + let mut sigmal1: [Vec; PERMUTS] = array::from_fn(|_| vec![F::zero(); n]); for (row, gate) in self.gates.iter().enumerate() { for (cell, sigma) in gate.wires.iter().zip(sigmal1.iter_mut()) { @@ -383,6 +398,14 @@ impl ConstraintSystem { } } + // Zero out the sigmas in the zk rows, to ensure that the permutation aggregation is + // quasi-random for those rows. + for row in n + 2 - (self.zk_rows as usize)..n - 1 { + for sigma in sigmal1.iter_mut() { + sigma[row] = F::zero(); + } + } + let sigmal1: [_; PERMUTS] = { let [s0, s1, s2, s3, s4, s5, s6] = sigmal1; [ @@ -423,7 +446,7 @@ impl ConstraintSystem { .interpolate(); // coefficient polynomial - let coefficients: [_; W] = array::from_fn(|i| { + let coefficients: [_; COLUMNS] = array::from_fn(|i| { let padded = self .gates .iter() @@ -441,10 +464,10 @@ impl ConstraintSystem { } } - pub(crate) fn column_evaluations( + pub(crate) fn column_evaluations( &self, - evaluated_column_coefficients: &EvaluatedColumnCoefficients, - ) -> ColumnEvaluations { + evaluated_column_coefficients: &EvaluatedColumnCoefficients, + ) -> ColumnEvaluations { let permutation_coefficients8 = array::from_fn(|i| { evaluated_column_coefficients.permutation_coefficients[i] .evaluate_over_domain_by_ref(self.domain.d8) @@ -634,6 +657,10 @@ impl ConstraintSystem { } } +pub fn zk_rows_strict_lower_bound(num_chunks: usize) -> usize { + (2 * (PERMUTS + 1) * num_chunks - 2) / PERMUTS +} + impl Builder { /// Set up the number of public inputs. /// If not invoked, it equals `0` by default. @@ -687,8 +714,13 @@ impl Builder { self } + pub fn max_poly_size(mut self, max_poly_size: Option) -> Self { + self.max_poly_size = max_poly_size; + self + } + /// Build the [ConstraintSystem] from a [Builder]. - pub fn build(self) -> Result, SetupError> { + pub fn build(self) -> Result, SetupError> { let mut gates = self.gates; let lookup_tables = self.lookup_tables; let runtime_tables = self.runtime_tables; @@ -699,8 +731,9 @@ impl Builder { let lookup_features = LookupFeatures::from_gates(&gates, runtime_tables.is_some()); - let num_lookups = { - let mut num_lookups: usize = lookup_tables + let lookup_domain_size = { + // First we sum over the lookup table size + let mut lookup_domain_size: usize = lookup_tables .iter() .map( |LookupTable { data, id: _ }| { @@ -712,30 +745,86 @@ impl Builder { }, ) .sum(); - for runtime_table in runtime_tables.iter() { - num_lookups += runtime_table.len(); + // After that on the runtime tables + if let Some(runtime_tables) = runtime_tables.as_ref() { + for runtime_table in runtime_tables.iter() { + lookup_domain_size += runtime_table.len(); + } } + // And we add the built-in tables, depending on the features. let LookupFeatures { patterns, .. } = &lookup_features; for pattern in patterns.into_iter() { if let Some(gate_table) = pattern.table() { for table in gate_table { - num_lookups += table.table_size(); + lookup_domain_size += table.table_size(); } } } - num_lookups + lookup_domain_size }; - //~ 2. Create a domain for the circuit. That is, + //~ 1. Compute the number of zero-knowledge rows (`zk_rows`) that will be required to + //~ achieve zero-knowledge. The following constraints apply to `zk_rows`: + //~ * The number of chunks `c` results in an evaluation at `zeta` and `zeta * omega` in + //~ each column for `2*c` evaluations per column, so `zk_rows >= 2*c + 1`. + //~ * The permutation argument interacts with the `c` chunks in parallel, so it is + //~ possible to cross-correlate between them to compromise zero knowledge. We know + //~ that there is some `c >= 1` such that `zk_rows = 2*c + k` from the above. Thus, + //~ attempting to find the evaluation at a new point, we find that: + //~ * the evaluation of every witness column in the permutation contains `k` unknowns; + //~ * the evaluations of the permutation argument aggregation has `k-1` unknowns; + //~ * the permutation argument applies on all but `zk_rows - 3` rows; + //~ * and thus we form the equation `zk_rows - 3 < 7 * k + (k - 1)` to ensure that we + //~ can construct fewer equations than we have unknowns. + //~ + //~ This simplifies to `k > (2 * c - 2) / 7`, giving `zk_rows > (16 * c - 2) / 7`. + //~ We can derive `c` from the `max_poly_size` supported by the URS, and thus we find + //~ `zk_rows` and `domain_size` satisfying the fixpoint + //~ + //~ ```text + //~ zk_rows = (16 * (domain_size / max_poly_size) + 5) / 7 + //~ domain_size = circuit_size + zk_rows + //~ ``` + //~ + let (zk_rows, domain_size_lower_bound) = { + let circuit_lower_bound = std::cmp::max(gates.len(), lookup_domain_size + 1); + let get_domain_size_lower_bound = |zk_rows: u64| circuit_lower_bound + zk_rows as usize; + + let mut zk_rows = 3; + let mut domain_size_lower_bound = get_domain_size_lower_bound(zk_rows); + if let Some(max_poly_size) = self.max_poly_size { + // Iterate to find a fixed-point where zk_rows is sufficient for the number of + // chunks that we use, and also does not cause us to overflow the domain size. + // NB: We use iteration here rather than hard-coding an assumption about + // `compute_size_of_domain`s internals. In practice, this will never be executed + // more than once. + while { + let domain_size = D::::compute_size_of_domain(domain_size_lower_bound) + .ok_or(SetupError::DomainCreation( + DomainCreationError::DomainSizeFailed(domain_size_lower_bound), + ))?; + let num_chunks = if domain_size < max_poly_size { + 1 + } else { + domain_size / max_poly_size + }; + zk_rows = (zk_rows_strict_lower_bound(num_chunks) + 1) as u64; + domain_size_lower_bound = get_domain_size_lower_bound(zk_rows); + domain_size < domain_size_lower_bound + } {} + } + (zk_rows, domain_size_lower_bound) + }; + + //~ 1. Create a domain for the circuit. That is, //~ compute the smallest subgroup of the field that - //~ has order greater or equal to `n + ZK_ROWS` elements. - let domain_size_lower_bound = - std::cmp::max(gates.len(), num_lookups + 1) + ZK_ROWS as usize; - let domain = EvaluationDomains::::create(domain_size_lower_bound)?; + //~ has order greater or equal to `n + zk_rows` elements. + let domain = EvaluationDomains::::create(domain_size_lower_bound) + .map_err(SetupError::DomainCreation)?; - assert!(domain.d1.size > ZK_ROWS); + assert!(domain.d1.size > zk_rows); - //~ 3. Pad the circuit: add zero gates to reach the domain size. + //~ 1. Pad the circuit: add zero gates to reach the domain size. let d1_size = domain.d1.size(); let mut padding = (gates.len()..d1_size) .map(|i| { @@ -771,15 +860,20 @@ impl Builder { } } - //~ 4. sample the `PERMUTS` shifts. + //~ 1. sample the `PERMUTS` shifts. let shifts = Shifts::new(&domain.d1); // // Lookup // ------ - let lookup_constraint_system = - LookupConstraintSystem::create::(&gates, lookup_tables, runtime_tables, &domain) - .map_err(|e| SetupError::ConstraintSystem(e.to_string()))?; + let lookup_constraint_system = LookupConstraintSystem::create::( + &gates, + lookup_tables, + runtime_tables, + &domain, + zk_rows as usize, + ) + .map_err(|e| SetupError::ConstraintSystem(e.to_string()))?; let sid = shifts.map[0].clone(); @@ -796,6 +890,7 @@ impl Builder { gates, shift: shifts.shifts, endo, + zk_rows, //fr_sponge_params: self.sponge_params, lookup_constraint_system, feature_flags, @@ -821,12 +916,12 @@ pub mod tests { use mina_curves::pasta::Fp; impl ConstraintSystem { - pub fn for_testing(gates: Vec>) -> Self { + pub fn for_testing(gates: Vec>) -> Self { let public = 0; // not sure if theres a smarter way instead of the double unwrap, but should be fine in the test ConstraintSystem::::create(gates) .public(public) - .build::() + .build::() .unwrap() } } @@ -834,7 +929,38 @@ pub mod tests { impl ConstraintSystem { pub fn fp_for_testing(gates: Vec>) -> Self { //let fp_sponge_params = mina_poseidon::pasta::fp_kimchi::params(); - Self::for_testing::(gates) + Self::for_testing::(gates) + } + } + + #[test] + pub fn test_domains_computation_with_runtime_tables() { + let dummy_gate = CircuitGate { + typ: GateType::Generic, + wires: [Wire::new(0, 0); PERMUTS], + coeffs: vec![Fp::zero()], + }; + // inputs + expected output + let data = [((10, 10), 128), ((0, 0), 8), ((5, 100), 512)]; + for ((number_of_rt_cfgs, size), expected_domain_size) in data.into_iter() { + let builder = ConstraintSystem::create(vec![dummy_gate.clone(), dummy_gate.clone()]); + let table_ids: Vec = (0..number_of_rt_cfgs).collect(); + let rt_cfgs: Vec> = table_ids + .into_iter() + .map(|table_id| { + let indexes: Vec = (0..size).collect(); + let first_column: Vec = indexes.into_iter().map(Fp::from).collect(); + RuntimeTableCfg { + id: table_id, + first_column, + } + }) + .collect(); + let res = builder + .runtime(Some(rt_cfgs)) + .build::() + .unwrap(); + assert_eq!(res.domain.d1.size, expected_domain_size) } } } diff --git a/kimchi/src/circuits/domain_constant_evaluation.rs b/kimchi/src/circuits/domain_constant_evaluation.rs index 15cbb5ff92..6659f42d31 100644 --- a/kimchi/src/circuits/domain_constant_evaluation.rs +++ b/kimchi/src/circuits/domain_constant_evaluation.rs @@ -1,8 +1,6 @@ //! This contains the [DomainConstantEvaluations] which is used to provide precomputations to a [ConstraintSystem](super::constraints::ConstraintSystem). use crate::circuits::domains::EvaluationDomains; -use crate::circuits::polynomials::permutation::zk_polynomial; -use crate::circuits::polynomials::permutation::ZK_ROWS; use ark_ff::FftField; use ark_poly::EvaluationDomain; use ark_poly::UVPolynomial; @@ -10,7 +8,7 @@ use ark_poly::{univariate::DensePolynomial as DP, Evaluations as E, Radix2Evalua use serde::{Deserialize, Serialize}; use serde_with::serde_as; -use super::polynomials::permutation::vanishes_on_last_4_rows; +use super::polynomials::permutation::{permutation_vanishing_polynomial, vanishes_on_last_n_rows}; #[serde_as] #[derive(Clone, Serialize, Deserialize, Debug)] @@ -26,18 +24,18 @@ pub struct DomainConstantEvaluations { /// 0-th Lagrange evaluated over domain.d8 #[serde_as(as = "o1_utils::serialization::SerdeAs")] pub constant_1_d8: E>, - /// the polynomial that vanishes on the last four rows + /// the polynomial that vanishes on the zero-knowledge rows and the row before #[serde_as(as = "o1_utils::serialization::SerdeAs")] - pub vanishes_on_last_4_rows: E>, + pub vanishes_on_zero_knowledge_and_previous_rows: E>, /// zero-knowledge polynomial over domain.d8 #[serde_as(as = "o1_utils::serialization::SerdeAs")] - pub zkpl: E>, + pub permutation_vanishing_polynomial_l: E>, #[serde_as(as = "o1_utils::serialization::SerdeAs")] - pub zkpm: DP, + pub permutation_vanishing_polynomial_m: DP, } impl DomainConstantEvaluations { - pub fn create(domain: EvaluationDomains) -> Option { + pub fn create(domain: EvaluationDomains, zk_rows: u64) -> Option { let poly_x_d1 = DP::from_coefficients_slice(&[F::zero(), F::one()]) .evaluate_over_domain_by_ref(domain.d8); let constant_1_d4 = @@ -45,22 +43,24 @@ impl DomainConstantEvaluations { let constant_1_d8 = E::>::from_vec_and_domain(vec![F::one(); domain.d8.size()], domain.d8); - let vanishes_on_last_4_rows = - vanishes_on_last_4_rows(domain.d1).evaluate_over_domain(domain.d8); + let vanishes_on_zero_knowledge_and_previous_rows = + vanishes_on_last_n_rows(domain.d1, zk_rows + 1).evaluate_over_domain(domain.d8); - assert!(domain.d1.size > ZK_ROWS); + assert!(domain.d1.size > zk_rows); // x^3 - x^2(w1+w2+w3) + x(w1w2+w1w3+w2w3) - w1w2w3 - let zkpm = zk_polynomial(domain.d1); - let zkpl = zkpm.evaluate_over_domain_by_ref(domain.d8); + let permutation_vanishing_polynomial_m = + permutation_vanishing_polynomial(domain.d1, zk_rows); + let permutation_vanishing_polynomial_l = + permutation_vanishing_polynomial_m.evaluate_over_domain_by_ref(domain.d8); Some(DomainConstantEvaluations { poly_x_d1, constant_1_d4, constant_1_d8, - vanishes_on_last_4_rows, - zkpl, - zkpm, + vanishes_on_zero_knowledge_and_previous_rows, + permutation_vanishing_polynomial_l, + permutation_vanishing_polynomial_m, }) } } diff --git a/kimchi/src/circuits/domains.rs b/kimchi/src/circuits/domains.rs index 7f32dd6e12..89251bea5f 100644 --- a/kimchi/src/circuits/domains.rs +++ b/kimchi/src/circuits/domains.rs @@ -3,7 +3,7 @@ use ark_poly::{EvaluationDomain, Radix2EvaluationDomain as Domain}; use serde::{Deserialize, Serialize}; use serde_with::serde_as; -use crate::error::SetupError; +use crate::error::DomainCreationError; #[serde_as] #[derive(Debug, Clone, Copy, Serialize, Deserialize)] @@ -22,26 +22,29 @@ impl EvaluationDomains { /// Creates 4 evaluation domains `d1` (of size `n`), `d2` (of size `2n`), `d4` (of size `4n`), /// and `d8` (of size `8n`). If generator of `d8` is `g`, the generator /// of `d4` is `g^2`, the generator of `d2` is `g^4`, and the generator of `d1` is `g^8`. - pub fn create(n: usize) -> Result { - let n = Domain::::compute_size_of_domain(n).ok_or(SetupError::DomainCreation( - "could not compute size of domain", - ))?; + pub fn create(n: usize) -> Result { + let n = Domain::::compute_size_of_domain(n) + .ok_or(DomainCreationError::DomainSizeFailed(n))?; - let d1 = Domain::::new(n).ok_or(SetupError::DomainCreation( - "construction of domain d1 did not work as intended", + let d1 = Domain::::new(n).ok_or(DomainCreationError::DomainConstructionFailed( + "d1".to_string(), + n, ))?; // we also create domains of larger sizes // to efficiently operate on polynomials in evaluation form. // (in evaluation form, the domain needs to grow as the degree of a polynomial grows) - let d2 = Domain::::new(2 * n).ok_or(SetupError::DomainCreation( - "construction of domain d2 did not work as intended", + let d2 = Domain::::new(2 * n).ok_or(DomainCreationError::DomainConstructionFailed( + "d2".to_string(), + 2 * n, ))?; - let d4 = Domain::::new(4 * n).ok_or(SetupError::DomainCreation( - "construction of domain d4 did not work as intended", + let d4 = Domain::::new(4 * n).ok_or(DomainCreationError::DomainConstructionFailed( + "d4".to_string(), + 4 * n, ))?; - let d8 = Domain::::new(8 * n).ok_or(SetupError::DomainCreation( - "construction of domain d8 did not work as intended", + let d8 = Domain::::new(8 * n).ok_or(DomainCreationError::DomainConstructionFailed( + "d8".to_string(), + 8 * n, ))?; // ensure the relationship between the three domains in case the library's behavior changes diff --git a/kimchi/src/circuits/expr.rs b/kimchi/src/circuits/expr.rs index 649f405415..90150e237f 100644 --- a/kimchi/src/circuits/expr.rs +++ b/kimchi/src/circuits/expr.rs @@ -1,5 +1,6 @@ use crate::{ circuits::{ + berkeley_columns, constraints::FeatureFlags, domains::EvaluationDomains, gate::{CurrOrNext, GateType}, @@ -7,9 +8,9 @@ use crate::{ index::LookupSelectors, lookups::{LookupPattern, LookupPatterns}, }, - polynomials::permutation::eval_vanishes_on_last_4_rows, + polynomials::permutation::eval_vanishes_on_last_n_rows, }, - proof::{PointEvaluations, ProofEvaluations}, + proof::PointEvaluations, }; use ark_ff::{FftField, Field, One, PrimeField, Zero}; use ark_poly::{ @@ -20,18 +21,22 @@ use o1_utils::{foreign_field::ForeignFieldHelpers, FieldHelpers}; use rayon::prelude::*; use serde::{Deserialize, Serialize}; use std::ops::{Add, AddAssign, Mul, Neg, Sub}; +use std::{ + cmp::Ordering, + fmt::{self, Debug}, + iter::FromIterator, +}; use std::{ collections::{HashMap, HashSet}, ops::MulAssign, }; -use std::{fmt, iter::FromIterator}; use thiserror::Error; use CurrOrNext::{Curr, Next}; use self::constraints::ExprOps; #[derive(Debug, Error)] -pub enum ExprError { +pub enum ExprError { #[error("Empty stack")] EmptyStack, @@ -45,7 +50,7 @@ pub enum ExprError { MissingIndexEvaluation(Column), #[error("Linearization failed (too many unevaluated columns: {0:?}")] - FailedLinearization(Vec), + FailedLinearization(Vec>), #[error("runtime table not available")] MissingRuntime, @@ -66,6 +71,8 @@ pub struct Constants { pub endo_coefficient: F, /// The MDS matrix pub mds: &'static Vec>, + /// The number of zero-knowledge rows + pub zk_rows: u64, } /// The polynomials specific to the lookup argument. @@ -90,13 +97,13 @@ pub struct LookupEnvironment<'a, F: FftField> { /// required to evaluate an expression as a polynomial. /// /// All are evaluations. -pub struct Environment<'a, const W: usize, F: FftField> { +pub struct Environment<'a, F: FftField, const COLUMNS: usize = KIMCHI_COLS> { /// The witness column polynomials - pub witness: &'a [Evaluations>; W], + pub witness: &'a [Evaluations>; COLUMNS], /// The coefficient column polynomials - pub coefficient: &'a [Evaluations>; W], - /// The polynomial which vanishes on the last 4 elements of the domain. - pub vanishes_on_last_4_rows: &'a Evaluations>, + pub coefficient: &'a [Evaluations>; COLUMNS], + /// The polynomial that vanishes on the zero-knowledge rows and the row before. + pub vanishes_on_zero_knowledge_and_previous_rows: &'a Evaluations>, /// The permutation aggregation polynomial. pub z: &'a Evaluations>, /// The index selector polynomials. @@ -112,9 +119,22 @@ pub struct Environment<'a, const W: usize, F: FftField> { pub lookup: Option>, } -impl<'a, const W: usize, F: FftField> Environment<'a, W, F> { - fn get_column(&self, col: &Column) -> Option<&'a Evaluations>> { - use Column::*; +pub trait ColumnEnvironment<'a, F: FftField> { + type Column; + fn get_column(&self, col: &Self::Column) -> Option<&'a Evaluations>>; + fn get_domain(&self, d: Domain) -> D; + fn get_constants(&self) -> &Constants; + fn vanishes_on_zero_knowledge_and_previous_rows(&self) -> &'a Evaluations>; + fn l0_1(&self) -> F; +} + +impl<'a, F: FftField, const COLUMNS: usize> ColumnEnvironment<'a, F> + for Environment<'a, F, COLUMNS> +{ + type Column = berkeley_columns::Column; + + fn get_column(&self, col: &Self::Column) -> Option<&'a Evaluations>> { + use berkeley_columns::Column::*; let lookup = self.lookup.as_ref(); match col { Witness(i) => Some(&self.witness[*i]), @@ -133,6 +153,27 @@ impl<'a, const W: usize, F: FftField> Environment<'a, W, F> { Permutation(_) => None, } } + + fn get_domain(&self, d: Domain) -> D { + match d { + Domain::D1 => self.domain.d1, + Domain::D2 => self.domain.d2, + Domain::D4 => self.domain.d4, + Domain::D8 => self.domain.d8, + } + } + + fn get_constants(&self) -> &Constants { + &self.constants + } + + fn vanishes_on_zero_knowledge_and_previous_rows(&self) -> &'a Evaluations> { + self.vanishes_on_zero_knowledge_and_previous_rows + } + + fn l0_1(&self) -> F { + self.l0_1 + } } // In this file, we define... @@ -163,100 +204,20 @@ fn unnormalized_lagrange_basis(domain: &D, i: i32, pt: &F) -> F domain.evaluate_vanishing_polynomial(*pt) / (*pt - omega_i) } -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize, Deserialize)] -/// A type representing one of the polynomials involved in the PLONK IOP. -pub enum Column { - Witness(usize), - Z, - LookupSorted(usize), - LookupAggreg, - LookupTable, - LookupKindIndex(LookupPattern), - LookupRuntimeSelector, - LookupRuntimeTable, - Index(GateType), - Coefficient(usize), - Permutation(usize), -} - -impl Column { - fn domain(&self) -> Domain { - match self { - Column::Index(GateType::Generic) => Domain::D4, - Column::Index(GateType::CompleteAdd) => Domain::D4, - _ => Domain::D8, - } - } - - fn latex(&self) -> String { - match self { - Column::Witness(i) => format!("w_{{{i}}}"), - Column::Z => "Z".to_string(), - Column::LookupSorted(i) => format!("s_{{{i}}}"), - Column::LookupAggreg => "a".to_string(), - Column::LookupTable => "t".to_string(), - Column::LookupKindIndex(i) => format!("k_{{{i:?}}}"), - Column::LookupRuntimeSelector => "rts".to_string(), - Column::LookupRuntimeTable => "rt".to_string(), - Column::Index(gate) => { - format!("{gate:?}") - } - Column::Coefficient(i) => format!("c_{{{i}}}"), - Column::Permutation(i) => format!("sigma_{{{i}}}"), - } - } - - fn text(&self) -> String { - match self { - Column::Witness(i) => format!("w[{i}]"), - Column::Z => "Z".to_string(), - Column::LookupSorted(i) => format!("s[{i}]"), - Column::LookupAggreg => "a".to_string(), - Column::LookupTable => "t".to_string(), - Column::LookupKindIndex(i) => format!("k[{i:?}]"), - Column::LookupRuntimeSelector => "rts".to_string(), - Column::LookupRuntimeTable => "rt".to_string(), - Column::Index(gate) => { - format!("{gate:?}") - } - Column::Coefficient(i) => format!("c[{i}]"), - Column::Permutation(i) => format!("sigma_[{i}]"), - } - } +pub trait GenericColumn { + fn domain(&self) -> Domain; } #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize, Deserialize)] /// A type representing a variable which can appear in a constraint. It specifies a column /// and a relative position (Curr or Next) -pub struct Variable { +pub struct Variable { /// The column of this variable pub col: Column, /// The row (Curr of Next) of this variable pub row: CurrOrNext, } -impl Variable { - fn ocaml(&self) -> String { - format!("var({:?}, {:?})", self.col, self.row) - } - - fn latex(&self) -> String { - let col = self.col.latex(); - match self.row { - Curr => col, - Next => format!("\\tilde{{{col}}}"), - } - } - - fn text(&self) -> String { - let col = self.col.text(); - match self.row { - Curr => format!("Curr({col})"), - Next => format!("Next({col})"), - } - } -} - #[derive(Clone, Debug, PartialEq)] /// An arithmetic expression over /// @@ -283,7 +244,7 @@ pub enum ConstantExpr { } impl ConstantExpr { - fn to_polish_(&self, res: &mut Vec>) { + fn to_polish_(&self, res: &mut Vec>) { match self { ConstantExpr::Alpha => res.push(PolishToken::Alpha), ConstantExpr::Beta => res.push(PolishToken::Beta), @@ -418,7 +379,7 @@ pub enum Op2 { } impl Op2 { - fn to_polish(&self) -> PolishToken { + fn to_polish(&self) -> PolishToken { use Op2::*; match self { Add => PolishToken::Add, @@ -457,40 +418,49 @@ impl FeatureFlag { } } +#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct RowOffset { + pub zk_rows: bool, + pub offset: i32, +} + /// An multi-variate polynomial over the base ring `C` with /// variables /// /// - `Cell(v)` for `v : Variable` -/// - VanishesOnLast4Rows +/// - VanishesOnZeroKnowledgeAndPreviousRows /// - UnnormalizedLagrangeBasis(i) for `i : i32` /// /// This represents a PLONK "custom constraint", which enforces that /// the corresponding combination of the polynomials corresponding to /// the above variables should vanish on the PLONK domain. #[derive(Clone, Debug, PartialEq)] -pub enum Expr { +pub enum Expr { Constant(C), - Cell(Variable), - Double(Box>), - Square(Box>), - BinOp(Op2, Box>, Box>), - VanishesOnLast4Rows, + Cell(Variable), + Double(Box>), + Square(Box>), + BinOp(Op2, Box>, Box>), + VanishesOnZeroKnowledgeAndPreviousRows, /// UnnormalizedLagrangeBasis(i) is /// (x^n - 1) / (x - omega^i) - UnnormalizedLagrangeBasis(i32), - Pow(Box>, u64), - Cache(CacheId, Box>), + UnnormalizedLagrangeBasis(RowOffset), + Pow(Box>, u64), + Cache(CacheId, Box>), /// If the feature flag is enabled, return the first expression; otherwise, return the second. - IfFeature(FeatureFlag, Box>, Box>), + IfFeature(FeatureFlag, Box>, Box>), } -impl + PartialEq + Clone> Expr { - fn apply_feature_flags_inner(&self, features: &FeatureFlags) -> (Expr, bool) { +impl + PartialEq + Clone, Column: Clone + PartialEq> + Expr +{ + fn apply_feature_flags_inner(&self, features: &FeatureFlags) -> (Expr, bool) { use Expr::*; match self { - Constant(_) | Cell(_) | VanishesOnLast4Rows | UnnormalizedLagrangeBasis(_) => { - (self.clone(), false) - } + Constant(_) + | Cell(_) + | VanishesOnZeroKnowledgeAndPreviousRows + | UnnormalizedLagrangeBasis(_) => (self.clone(), false), Double(c) => { let (c_reduced, reduce_further) = c.apply_feature_flags_inner(features); if reduce_further && c_reduced.is_zero() { @@ -619,7 +589,7 @@ impl + PartialEq + Clone> Expr { } } } - pub fn apply_feature_flags(&self, features: &FeatureFlags) -> Expr { + pub fn apply_feature_flags(&self, features: &FeatureFlags) -> Expr { let (res, _) = self.apply_feature_flags_inner(features); res } @@ -629,7 +599,7 @@ impl + PartialEq + Clone> Expr { /// [reverse Polish notation](https://en.wikipedia.org/wiki/Reverse_Polish_notation) /// expressions, which are vectors of the below tokens. #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub enum PolishToken { +pub enum PolishToken { Alpha, Beta, Gamma, @@ -640,14 +610,14 @@ pub enum PolishToken { col: usize, }, Literal(F), - Cell(Variable), + Cell(Variable), Dup, Pow(u64), Add, Mul, Sub, - VanishesOnLast4Rows, - UnnormalizedLagrangeBasis(i32), + VanishesOnZeroKnowledgeAndPreviousRows, + UnnormalizedLagrangeBasis(RowOffset), Store, Load(usize), /// Skip the given number of tokens if the feature is enabled. @@ -656,33 +626,17 @@ pub enum PolishToken { SkipIfNot(FeatureFlag, usize), } -impl Variable { - fn evaluate( +pub trait ColumnEvaluations { + type Column; + fn evaluate(&self, col: Self::Column) -> Result, ExprError>; +} + +impl Variable { + fn evaluate>( &self, - evals: &ProofEvaluations>, - ) -> Result { - let point_evaluations = { - use Column::*; - let l = evals - .lookup - .as_ref() - .ok_or(ExprError::LookupShouldNotBeUsed); - match self.col { - Witness(i) => Ok(evals.w[i]), - Z => Ok(evals.z), - LookupSorted(i) => l.map(|l| l.sorted[i]), - LookupAggreg => l.map(|l| l.aggreg), - LookupTable => l.map(|l| l.table), - LookupRuntimeTable => l.and_then(|l| l.runtime.ok_or(ExprError::MissingRuntime)), - Index(GateType::Poseidon) => Ok(evals.poseidon_selector), - Index(GateType::Generic) => Ok(evals.generic_selector), - Permutation(i) => Ok(evals.s[i]), - Coefficient(i) => Ok(evals.coefficients[i]), - LookupKindIndex(_) | LookupRuntimeSelector | Index(_) => { - Err(ExprError::MissingIndexEvaluation(self.col)) - } - } - }?; + evals: &Evaluations, + ) -> Result> { + let point_evaluations = evals.evaluate(self.col)?; match self.row { CurrOrNext::Curr => Ok(point_evaluations.zeta), CurrOrNext::Next => Ok(point_evaluations.zeta_omega), @@ -690,15 +644,15 @@ impl Variable { } } -impl PolishToken { +impl PolishToken { /// Evaluate an RPN expression to a field element. - pub fn evaluate( - toks: &[PolishToken], + pub fn evaluate>( + toks: &[PolishToken], d: D, pt: F, - evals: &ProofEvaluations>, + evals: &Evaluations, c: &Constants, - ) -> Result { + ) -> Result> { let mut stack = vec![]; let mut cache: Vec = vec![]; @@ -719,9 +673,16 @@ impl PolishToken { } EndoCoefficient => stack.push(c.endo_coefficient), Mds { row, col } => stack.push(c.mds[*row][*col]), - VanishesOnLast4Rows => stack.push(eval_vanishes_on_last_4_rows(d, pt)), + VanishesOnZeroKnowledgeAndPreviousRows => { + stack.push(eval_vanishes_on_last_n_rows(d, c.zk_rows + 1, pt)) + } UnnormalizedLagrangeBasis(i) => { - stack.push(unnormalized_lagrange_basis(&d, *i, &pt)) + let offset = if i.zk_rows { + -(c.zk_rows as i32) + i.offset + } else { + i.offset + }; + stack.push(unnormalized_lagrange_basis(&d, offset, &pt)) } Literal(x) => stack.push(*x), Dup => stack.push(stack[stack.len() - 1]), @@ -773,9 +734,9 @@ impl PolishToken { } } -impl Expr { +impl Expr { /// Convenience function for constructing cell variables. - pub fn cell(col: Column, row: CurrOrNext) -> Expr { + pub fn cell(col: Column, row: CurrOrNext) -> Expr { Expr::Cell(Variable { col, row }) } @@ -788,31 +749,33 @@ impl Expr { } /// Convenience function for constructing constant expressions. - pub fn constant(c: C) -> Expr { + pub fn constant(c: C) -> Expr { Expr::Constant(c) } - fn degree(&self, d1_size: u64) -> u64 { + fn degree(&self, d1_size: u64, zk_rows: u64) -> u64 { use Expr::*; match self { - Double(x) => x.degree(d1_size), + Double(x) => x.degree(d1_size, zk_rows), Constant(_) => 0, - VanishesOnLast4Rows => 4, + VanishesOnZeroKnowledgeAndPreviousRows => zk_rows + 1, UnnormalizedLagrangeBasis(_) => d1_size, Cell(_) => d1_size, - Square(x) => 2 * x.degree(d1_size), - BinOp(Op2::Mul, x, y) => (*x).degree(d1_size) + (*y).degree(d1_size), + Square(x) => 2 * x.degree(d1_size, zk_rows), + BinOp(Op2::Mul, x, y) => (*x).degree(d1_size, zk_rows) + (*y).degree(d1_size, zk_rows), BinOp(Op2::Add, x, y) | BinOp(Op2::Sub, x, y) => { - std::cmp::max((*x).degree(d1_size), (*y).degree(d1_size)) + std::cmp::max((*x).degree(d1_size, zk_rows), (*y).degree(d1_size, zk_rows)) + } + Pow(e, d) => d * e.degree(d1_size, zk_rows), + Cache(_, e) => e.degree(d1_size, zk_rows), + IfFeature(_, e1, e2) => { + std::cmp::max(e1.degree(d1_size, zk_rows), e2.degree(d1_size, zk_rows)) } - Pow(e, d) => d * e.degree(d1_size), - Cache(_, e) => e.degree(d1_size), - IfFeature(_, e1, e2) => std::cmp::max(e1.degree(d1_size), e2.degree(d1_size)), } } } -impl fmt::Display for Expr> +impl fmt::Display for Expr, berkeley_columns::Column> where F: PrimeField, { @@ -822,7 +785,7 @@ where } #[derive(Clone, Copy, Debug, PartialEq, FromPrimitive, ToPrimitive)] -enum Domain { +pub enum Domain { D1 = 1, D2 = 2, D4 = 4, @@ -895,11 +858,11 @@ pub fn pows(x: F, n: usize) -> Vec { /// = (omega^{q n} omega_8^{r n} - 1) / (omega_8^k - omega^i) /// = ((omega_8^n)^r - 1) / (omega_8^k - omega^i) /// = ((omega_8^n)^r - 1) / (omega^q omega_8^r - omega^i) -fn unnormalized_lagrange_evals( +fn unnormalized_lagrange_evals<'a, F: FftField, Environment: ColumnEnvironment<'a, F>>( l0_1: F, i: i32, res_domain: Domain, - env: &Environment, + env: &Environment, ) -> Evaluations> { let k = match res_domain { Domain::D1 => 1, @@ -907,9 +870,9 @@ fn unnormalized_lagrange_evals( Domain::D4 => 4, Domain::D8 => 8, }; - let res_domain = get_domain(res_domain, env); + let res_domain = env.get_domain(res_domain); - let d1 = env.domain.d1; + let d1 = env.get_domain(Domain::D1); let n = d1.size; // Renormalize negative values to wrap around at domain size let i = if i < 0 { @@ -1359,16 +1322,7 @@ impl<'a, F: FftField> EvalResult<'a, F> { } } -fn get_domain(d: Domain, env: &Environment) -> D { - match d { - Domain::D1 => env.domain.d1, - Domain::D2 => env.domain.d2, - Domain::D4 => env.domain.d4, - Domain::D8 => env.domain.d8, - } -} - -impl Expr> { +impl Expr, Column> { /// Convenience function for constructing expressions from literal /// field elements. pub fn literal(x: F) -> Self { @@ -1378,7 +1332,7 @@ impl Expr> { /// Combines multiple constraints `[c0, ..., cn]` into a single constraint /// `alpha^alpha0 * c0 + alpha^{alpha0 + 1} * c1 + ... + alpha^{alpha0 + n} * cn`. pub fn combine_constraints(alphas: impl Iterator, cs: Vec) -> Self { - let zero = Expr::>::zero(); + let zero = Expr::, Column>::zero(); cs.into_iter() .zip_eq(alphas) .map(|(c, i)| Expr::Constant(ConstantExpr::Alpha.pow(i as u64)) * c) @@ -1386,16 +1340,20 @@ impl Expr> { } } -impl Expr> { +impl Expr, Column> { /// Compile an expression to an RPN expression. - pub fn to_polish(&self) -> Vec> { + pub fn to_polish(&self) -> Vec> { let mut res = vec![]; let mut cache = HashMap::new(); self.to_polish_(&mut cache, &mut res); res } - fn to_polish_(&self, cache: &mut HashMap, res: &mut Vec>) { + fn to_polish_( + &self, + cache: &mut HashMap, + res: &mut Vec>, + ) { match self { Expr::Double(x) => { x.to_polish_(cache, res); @@ -1415,8 +1373,8 @@ impl Expr> { c.to_polish_(res); } Expr::Cell(v) => res.push(PolishToken::Cell(*v)), - Expr::VanishesOnLast4Rows => { - res.push(PolishToken::VanishesOnLast4Rows); + Expr::VanishesOnZeroKnowledgeAndPreviousRows => { + res.push(PolishToken::VanishesOnZeroKnowledgeAndPreviousRows); } Expr::UnnormalizedLagrangeBasis(i) => { res.push(PolishToken::UnnormalizedLagrangeBasis(*i)); @@ -1475,8 +1433,10 @@ impl Expr> { pub fn beta() -> Self { Expr::Constant(ConstantExpr::Beta) } +} - fn evaluate_constants_(&self, c: &Constants) -> Expr { +impl Expr, Column> { + fn evaluate_constants_(&self, c: &Constants) -> Expr { use Expr::*; // TODO: Use cache match self { @@ -1485,7 +1445,7 @@ impl Expr> { Square(x) => x.evaluate_constants_(c).square(), Constant(x) => Constant(x.value(c)), Cell(v) => Cell(*v), - VanishesOnLast4Rows => VanishesOnLast4Rows, + VanishesOnZeroKnowledgeAndPreviousRows => VanishesOnZeroKnowledgeAndPreviousRows, UnnormalizedLagrangeBasis(i) => UnnormalizedLagrangeBasis(*i), BinOp(Op2::Add, x, y) => x.evaluate_constants_(c) + y.evaluate_constants_(c), BinOp(Op2::Mul, x, y) => x.evaluate_constants_(c) * y.evaluate_constants_(c), @@ -1500,24 +1460,29 @@ impl Expr> { } /// Evaluate an expression as a field element against an environment. - pub fn evaluate( + pub fn evaluate< + 'a, + const COLUMNS: usize, + Evaluations: ColumnEvaluations, + Environment: ColumnEnvironment<'a, F, Column = Column>, + >( &self, d: D, pt: F, - evals: &ProofEvaluations>, - env: &Environment, - ) -> Result { - self.evaluate_(d, pt, evals, &env.constants) + evals: &Evaluations, + env: &Environment, + ) -> Result> { + self.evaluate_(d, pt, evals, env.get_constants()) } /// Evaluate an expression as a field element against the constants. - pub fn evaluate_( + pub fn evaluate_>( &self, d: D, pt: F, - evals: &ProofEvaluations>, + evals: &Evaluations, c: &Constants, - ) -> Result { + ) -> Result> { use Expr::*; match self { Double(x) => x.evaluate_(d, pt, evals, c).map(|x| x.double()), @@ -1539,8 +1504,17 @@ impl Expr> { let y = (*y).evaluate_(d, pt, evals, c)?; Ok(x - y) } - VanishesOnLast4Rows => Ok(eval_vanishes_on_last_4_rows(d, pt)), - UnnormalizedLagrangeBasis(i) => Ok(unnormalized_lagrange_basis(&d, *i, &pt)), + VanishesOnZeroKnowledgeAndPreviousRows => { + Ok(eval_vanishes_on_last_n_rows(d, c.zk_rows + 1, pt)) + } + UnnormalizedLagrangeBasis(i) => { + let offset = if i.zk_rows { + -(c.zk_rows as i32) + i.offset + } else { + i.offset + }; + Ok(unnormalized_lagrange_basis(&d, offset, &pt)) + } Cell(v) => v.evaluate(evals), Cache(_, e) => e.evaluate_(d, pt, evals, c), IfFeature(feature, e1, e2) => { @@ -1554,12 +1528,18 @@ impl Expr> { } /// Evaluate the constant expressions in this expression down into field elements. - pub fn evaluate_constants(&self, env: &Environment) -> Expr { - self.evaluate_constants_(&env.constants) + pub fn evaluate_constants<'a, Environment: ColumnEnvironment<'a, F, Column = Column>>( + &self, + env: &Environment, + ) -> Expr { + self.evaluate_constants_(env.get_constants()) } /// Compute the polynomial corresponding to this expression, in evaluation form. - pub fn evaluations(&self, env: &Environment<'_, W, F>) -> Evaluations> { + pub fn evaluations<'a, Environment: ColumnEnvironment<'a, F, Column = Column>>( + &self, + env: &Environment, + ) -> Evaluations> { self.evaluate_constants(env).evaluations(env) } } @@ -1569,53 +1549,66 @@ enum Either { Right(B), } -impl Expr { +impl Expr { /// Evaluate an expression into a field element. - pub fn evaluate( + pub fn evaluate>( &self, d: D, pt: F, - evals: &ProofEvaluations>, - ) -> Result { + zk_rows: u64, + evals: &Evaluations, + ) -> Result> { use Expr::*; match self { Constant(x) => Ok(*x), - Pow(x, p) => Ok(x.evaluate(d, pt, evals)?.pow([*p])), - Double(x) => x.evaluate(d, pt, evals).map(|x| x.double()), - Square(x) => x.evaluate(d, pt, evals).map(|x| x.square()), + Pow(x, p) => Ok(x.evaluate(d, pt, zk_rows, evals)?.pow([*p])), + Double(x) => x.evaluate(d, pt, zk_rows, evals).map(|x| x.double()), + Square(x) => x.evaluate(d, pt, zk_rows, evals).map(|x| x.square()), BinOp(Op2::Mul, x, y) => { - let x = (*x).evaluate(d, pt, evals)?; - let y = (*y).evaluate(d, pt, evals)?; + let x = (*x).evaluate(d, pt, zk_rows, evals)?; + let y = (*y).evaluate(d, pt, zk_rows, evals)?; Ok(x * y) } BinOp(Op2::Add, x, y) => { - let x = (*x).evaluate(d, pt, evals)?; - let y = (*y).evaluate(d, pt, evals)?; + let x = (*x).evaluate(d, pt, zk_rows, evals)?; + let y = (*y).evaluate(d, pt, zk_rows, evals)?; Ok(x + y) } BinOp(Op2::Sub, x, y) => { - let x = (*x).evaluate(d, pt, evals)?; - let y = (*y).evaluate(d, pt, evals)?; + let x = (*x).evaluate(d, pt, zk_rows, evals)?; + let y = (*y).evaluate(d, pt, zk_rows, evals)?; Ok(x - y) } - VanishesOnLast4Rows => Ok(eval_vanishes_on_last_4_rows(d, pt)), - UnnormalizedLagrangeBasis(i) => Ok(unnormalized_lagrange_basis(&d, *i, &pt)), + VanishesOnZeroKnowledgeAndPreviousRows => { + Ok(eval_vanishes_on_last_n_rows(d, zk_rows + 1, pt)) + } + UnnormalizedLagrangeBasis(i) => { + let offset = if i.zk_rows { + -(zk_rows as i32) + i.offset + } else { + i.offset + }; + Ok(unnormalized_lagrange_basis(&d, offset, &pt)) + } Cell(v) => v.evaluate(evals), - Cache(_, e) => e.evaluate(d, pt, evals), + Cache(_, e) => e.evaluate(d, pt, zk_rows, evals), IfFeature(feature, e1, e2) => { if feature.is_enabled() { - e1.evaluate(d, pt, evals) + e1.evaluate(d, pt, zk_rows, evals) } else { - e2.evaluate(d, pt, evals) + e2.evaluate(d, pt, zk_rows, evals) } } } } /// Compute the polynomial corresponding to this expression, in evaluation form. - pub fn evaluations(&self, env: &Environment<'_, W, F>) -> Evaluations> { - let d1_size = env.domain.d1.size; - let deg = self.degree(d1_size); + pub fn evaluations<'a, Environment: ColumnEnvironment<'a, F, Column = Column>>( + &self, + env: &Environment, + ) -> Evaluations> { + let d1_size = env.get_domain(Domain::D1).size; + let deg = self.degree(d1_size, env.get_constants().zk_rows); let d = if deg <= d1_size { Domain::D1 } else if deg <= 4 * d1_size { @@ -1638,13 +1631,13 @@ impl Expr { assert_eq!(domain, d); evals } - EvalResult::Constant(x) => EvalResult::init_((d, get_domain(d, env)), |_| x), + EvalResult::Constant(x) => EvalResult::init_((d, env.get_domain(d)), |_| x), EvalResult::SubEvals { evals, domain: d_sub, shift: s, } => { - let res_domain = get_domain(d, env); + let res_domain = env.get_domain(d); let scale = (d_sub as usize) / (d as usize); assert!(scale != 0); EvalResult::init_((d, res_domain), |i| { @@ -1654,16 +1647,16 @@ impl Expr { } } - fn evaluations_helper<'a, 'b, const W: usize>( + fn evaluations_helper<'a, 'b, Environment: ColumnEnvironment<'a, F, Column = Column>>( &self, cache: &'b mut HashMap>, d: Domain, - env: &Environment<'a, W, F>, + env: &Environment, ) -> Either, CacheId> where 'a: 'b, { - let dom = (d, get_domain(d, env)); + let dom = (d, env.get_domain(d)); let res: EvalResult<'a, F> = match self { Expr::Square(x) => match x.evaluations_helper(cache, d, env) { @@ -1725,22 +1718,29 @@ impl Expr { Expr::Pow(x, p) => { let x = x.evaluations_helper(cache, d, env); match x { - Either::Left(x) => x.pow(*p, (d, get_domain(d, env))), + Either::Left(x) => x.pow(*p, (d, env.get_domain(d))), Either::Right(id) => { - id.get_from(cache).unwrap().pow(*p, (d, get_domain(d, env))) + id.get_from(cache).unwrap().pow(*p, (d, env.get_domain(d))) } } } - Expr::VanishesOnLast4Rows => EvalResult::SubEvals { + Expr::VanishesOnZeroKnowledgeAndPreviousRows => EvalResult::SubEvals { domain: Domain::D8, shift: 0, - evals: env.vanishes_on_last_4_rows, + evals: env.vanishes_on_zero_knowledge_and_previous_rows(), }, Expr::Constant(x) => EvalResult::Constant(*x), - Expr::UnnormalizedLagrangeBasis(i) => EvalResult::Evals { - domain: d, - evals: unnormalized_lagrange_evals(env.l0_1, *i, d, env), - }, + Expr::UnnormalizedLagrangeBasis(i) => { + let offset = if i.zk_rows { + -(env.get_constants().zk_rows as i32) + i.offset + } else { + i.offset + }; + EvalResult::Evals { + domain: d, + evals: unnormalized_lagrange_evals(env.l0_1(), offset, d, env), + } + } Expr::Cell(Variable { col, row }) => { let evals: &'a Evaluations> = { match env.get_column(col) { @@ -1755,7 +1755,7 @@ impl Expr { } } Expr::BinOp(op, e1, e2) => { - let dom = (d, get_domain(d, env)); + let dom = (d, env.get_domain(d)); let f = |x: EvalResult, y: EvalResult| match op { Op2::Mul => x.mul(y, dom), Op2::Add => x.add(y, dom), @@ -1791,12 +1791,12 @@ impl Expr { #[derive(Clone, Debug, Serialize, Deserialize)] /// A "linearization", which is linear combination with `E` coefficients of /// columns. -pub struct Linearization { +pub struct Linearization { pub constant_term: E, pub index_terms: Vec<(Column, E)>, } -impl Default for Linearization { +impl Default for Linearization { fn default() -> Self { Linearization { constant_term: E::default(), @@ -1805,9 +1805,9 @@ impl Default for Linearization { } } -impl Linearization { +impl Linearization { /// Apply a function to all the coefficients in the linearization. - pub fn map B>(&self, f: F) -> Linearization { + pub fn map B>(&self, f: F) -> Linearization { Linearization { constant_term: f(&self.constant_term), index_terms: self.index_terms.iter().map(|(c, x)| (*c, f(x))).collect(), @@ -1815,31 +1815,38 @@ impl Linearization { } } -impl Linearization>> { +impl + Linearization, Column>, Column> +{ /// Evaluate the constants in a linearization with `ConstantExpr` coefficients down /// to literal field elements. - pub fn evaluate_constants( + pub fn evaluate_constants<'a, Environment: ColumnEnvironment<'a, F, Column = Column>>( &self, - env: &Environment, - ) -> Linearization> { + env: &Environment, + ) -> Linearization, Column> { self.map(|e| e.evaluate_constants(env)) } } -impl Linearization>> { +impl Linearization>, Column> { /// Given a linearization and an environment, compute the polynomial corresponding to the /// linearization, in evaluation form. - pub fn to_polynomial( + pub fn to_polynomial< + 'a, + ColEvaluations: ColumnEvaluations, + Environment: ColumnEnvironment<'a, F, Column = Column>, + >( &self, - env: &Environment, + env: &Environment, pt: F, - evals: &ProofEvaluations>, + evals: &ColEvaluations, ) -> (F, Evaluations>) { - let cs = &env.constants; - let n = env.domain.d1.size(); + let cs = env.get_constants(); + let d1 = env.get_domain(Domain::D1); + let n = d1.size(); let mut res = vec![F::zero(); n]; self.index_terms.iter().for_each(|(idx, c)| { - let c = PolishToken::evaluate(c, env.domain.d1, pt, evals, cs).unwrap(); + let c = PolishToken::evaluate(c, d1, pt, evals, cs).unwrap(); let e = env .get_column(idx) .unwrap_or_else(|| panic!("Index polynomial {idx:?} not found")); @@ -1848,28 +1855,35 @@ impl Linearization>> { .enumerate() .for_each(|(i, r)| *r += c * e.evals[scale * i]); }); - let p = Evaluations::>::from_vec_and_domain(res, env.domain.d1); + let p = Evaluations::>::from_vec_and_domain(res, d1); ( - PolishToken::evaluate(&self.constant_term, env.domain.d1, pt, evals, cs).unwrap(), + PolishToken::evaluate(&self.constant_term, d1, pt, evals, cs).unwrap(), p, ) } } -impl Linearization>> { +impl + Linearization, Column>, Column> +{ /// Given a linearization and an environment, compute the polynomial corresponding to the /// linearization, in evaluation form. - pub fn to_polynomial( + pub fn to_polynomial< + 'a, + ColEvaluations: ColumnEvaluations, + Environment: ColumnEnvironment<'a, F, Column = Column>, + >( &self, - env: &Environment, + env: &Environment, pt: F, - evals: &ProofEvaluations>, + evals: &ColEvaluations, ) -> (F, DensePolynomial) { - let cs = &env.constants; - let n = env.domain.d1.size(); + let cs = env.get_constants(); + let d1 = env.get_domain(Domain::D1); + let n = d1.size(); let mut res = vec![F::zero(); n]; self.index_terms.iter().for_each(|(idx, c)| { - let c = c.evaluate_(env.domain.d1, pt, evals, cs).unwrap(); + let c = c.evaluate_(d1, pt, evals, cs).unwrap(); let e = env .get_column(idx) .unwrap_or_else(|| panic!("Index polynomial {idx:?} not found")); @@ -1878,17 +1892,12 @@ impl Linearization>> { .enumerate() .for_each(|(i, r)| *r += c * e.evals[scale * i]) }); - let p = Evaluations::>::from_vec_and_domain(res, env.domain.d1).interpolate(); - ( - self.constant_term - .evaluate_(env.domain.d1, pt, evals, cs) - .unwrap(), - p, - ) + let p = Evaluations::>::from_vec_and_domain(res, d1).interpolate(); + (self.constant_term.evaluate_(d1, pt, evals, cs).unwrap(), p) } } -impl Expr { +impl Expr { /// Exponentiate an expression #[must_use] pub fn pow(self, p: u64) -> Self { @@ -1900,27 +1909,32 @@ impl Expr { } } -type Monomials = HashMap, Expr>; +type Monomials = HashMap>, Expr>; -fn mul_monomials + Clone + One + Zero + PartialEq>( - e1: &Monomials, - e2: &Monomials, -) -> Monomials { - let mut res: HashMap<_, Expr> = HashMap::new(); +fn mul_monomials< + F: Neg + Clone + One + Zero + PartialEq, + Column: Ord + Copy + std::hash::Hash, +>( + e1: &Monomials, + e2: &Monomials, +) -> Monomials { + let mut res: HashMap<_, Expr> = HashMap::new(); for (m1, c1) in e1.iter() { for (m2, c2) in e2.iter() { let mut m = m1.clone(); m.extend(m2); m.sort(); let c1c2 = c1.clone() * c2.clone(); - let v = res.entry(m).or_insert_with(Expr::::zero); + let v = res.entry(m).or_insert_with(Expr::::zero); *v = v.clone() + c1c2; } } res } -impl + Clone + One + Zero + PartialEq> Expr { +impl + Clone + One + Zero + PartialEq, Column: Ord + Copy + std::hash::Hash> + Expr +{ // TODO: This function (which takes linear time) // is called repeatedly in monomials, yielding quadratic behavior for // that function. It's ok for now as we only call that function once on @@ -1934,20 +1948,20 @@ impl + Clone + One + Zero + PartialEq> Expr { Cell(v) => evaluated.contains(&v.col), Double(x) => x.is_constant(evaluated), BinOp(_, x, y) => x.is_constant(evaluated) && y.is_constant(evaluated), - VanishesOnLast4Rows => true, + VanishesOnZeroKnowledgeAndPreviousRows => true, UnnormalizedLagrangeBasis(_) => true, Cache(_, x) => x.is_constant(evaluated), IfFeature(_, e1, e2) => e1.is_constant(evaluated) && e2.is_constant(evaluated), } } - fn monomials(&self, ev: &HashSet) -> HashMap, Expr> { - let sing = |v: Vec, c: Expr| { + fn monomials(&self, ev: &HashSet) -> HashMap>, Expr> { + let sing = |v: Vec>, c: Expr| { let mut h = HashMap::new(); h.insert(v, c); h }; - let constant = |e: Expr| sing(vec![], e); + let constant = |e: Expr| sing(vec![], e); use Expr::*; if self.is_constant(ev) { @@ -1957,7 +1971,7 @@ impl + Clone + One + Zero + PartialEq> Expr { match self { Pow(x, d) => { // Run the multiplication logic with square and multiply - let mut acc = sing(vec![], Expr::::one()); + let mut acc = sing(vec![], Expr::::one()); let mut acc_is_one = true; let x = x.monomials(ev); @@ -1980,7 +1994,9 @@ impl + Clone + One + Zero + PartialEq> Expr { } Cache(_, e) => e.monomials(ev), UnnormalizedLagrangeBasis(i) => constant(UnnormalizedLagrangeBasis(*i)), - VanishesOnLast4Rows => constant(VanishesOnLast4Rows), + VanishesOnZeroKnowledgeAndPreviousRows => { + constant(VanishesOnZeroKnowledgeAndPreviousRows) + } Constant(c) => constant(Constant(c.clone())), Cell(var) => sing(vec![*var], Constant(F::one())), BinOp(Op2::Add, e1, e2) => { @@ -2064,9 +2080,9 @@ impl + Clone + One + Zero + PartialEq> Expr { pub fn linearize( &self, evaluated: HashSet, - ) -> Result>, ExprError> { - let mut res: HashMap> = HashMap::new(); - let mut constant_term: Expr = Self::zero(); + ) -> Result, Column>, ExprError> { + let mut res: HashMap> = HashMap::new(); + let mut constant_term: Expr = Self::zero(); let monomials = self.monomials(&evaluated); for (m, c) in monomials { @@ -2198,7 +2214,7 @@ impl Mul> for ConstantExpr { } } -impl Zero for Expr { +impl Zero for Expr { fn zero() -> Self { Expr::Constant(F::zero()) } @@ -2211,7 +2227,7 @@ impl Zero for Expr { } } -impl One for Expr { +impl One for Expr { fn one() -> Self { Expr::Constant(F::one()) } @@ -2224,10 +2240,10 @@ impl One for Expr { } } -impl> Neg for Expr { - type Output = Expr; +impl, Column> Neg for Expr { + type Output = Expr; - fn neg(self) -> Expr { + fn neg(self) -> Expr { match self { Expr::Constant(x) => Expr::Constant(x.neg()), e => Expr::BinOp( @@ -2239,8 +2255,8 @@ impl> Neg for Expr { } } -impl Add> for Expr { - type Output = Expr; +impl Add> for Expr { + type Output = Expr; fn add(self, other: Self) -> Self { if self.is_zero() { return other; @@ -2252,7 +2268,7 @@ impl Add> for Expr { } } -impl AddAssign> for Expr { +impl AddAssign> for Expr { fn add_assign(&mut self, other: Self) { if self.is_zero() { *self = other; @@ -2262,8 +2278,8 @@ impl AddAssign> for Expr { } } -impl Mul> for Expr { - type Output = Expr; +impl Mul> for Expr { + type Output = Expr; fn mul(self, other: Self) -> Self { if self.is_zero() || other.is_zero() { return Self::zero(); @@ -2279,9 +2295,10 @@ impl Mul> for Expr { } } -impl MulAssign> for Expr +impl MulAssign> for Expr where F: Zero + One + PartialEq + Clone, + Column: PartialEq + Clone, { fn mul_assign(&mut self, other: Self) { if self.is_zero() || other.is_zero() { @@ -2294,8 +2311,8 @@ where } } -impl Sub> for Expr { - type Output = Expr; +impl Sub> for Expr { + type Output = Expr; fn sub(self, other: Self) -> Self { if other.is_zero() { return self; @@ -2304,13 +2321,13 @@ impl Sub> for Expr { } } -impl From for Expr { +impl From for Expr { fn from(x: u64) -> Self { Expr::Constant(F::from(x)) } } -impl From for Expr> { +impl From for Expr, Column> { fn from(x: u64) -> Self { Expr::Constant(ConstantExpr::Literal(F::from(x))) } @@ -2322,8 +2339,8 @@ impl From for ConstantExpr { } } -impl Mul for Expr> { - type Output = Expr>; +impl Mul for Expr, Column> { + type Output = Expr, Column>; fn mul(self, y: F) -> Self::Output { Expr::Constant(ConstantExpr::Literal(y)) * self @@ -2399,7 +2416,7 @@ where } } -impl Expr> +impl Expr, berkeley_columns::Column> where F: PrimeField, { @@ -2426,14 +2443,21 @@ where /// Recursively print the expression, /// except for the cached expression that are stored in the `cache`. - fn ocaml(&self, cache: &mut HashMap>>) -> String { + fn ocaml( + &self, + cache: &mut HashMap, berkeley_columns::Column>>, + ) -> String { use Expr::*; match self { Double(x) => format!("double({})", x.ocaml(cache)), Constant(x) => x.ocaml(), Cell(v) => format!("cell({})", v.ocaml()), - UnnormalizedLagrangeBasis(i) => format!("unnormalized_lagrange_basis({})", *i), - VanishesOnLast4Rows => "vanishes_on_last_4_rows".to_string(), + UnnormalizedLagrangeBasis(i) => { + format!("unnormalized_lagrange_basis({}, {})", i.zk_rows, i.offset) + } + VanishesOnZeroKnowledgeAndPreviousRows => { + "vanishes_on_zero_knowledge_and_previous_rows".to_string() + } BinOp(Op2::Add, x, y) => format!("({} + {})", x.ocaml(cache), y.ocaml(cache)), BinOp(Op2::Mul, x, y) => format!("({} * {})", x.ocaml(cache), y.ocaml(cache)), BinOp(Op2::Sub, x, y) => format!("({} - {})", x.ocaml(cache), y.ocaml(cache)), @@ -2475,14 +2499,30 @@ where res } - fn latex(&self, cache: &mut HashMap>>) -> String { + fn latex( + &self, + cache: &mut HashMap, berkeley_columns::Column>>, + ) -> String { use Expr::*; match self { Double(x) => format!("2 ({})", x.latex(cache)), Constant(x) => x.latex(), Cell(v) => v.latex(), - UnnormalizedLagrangeBasis(i) => format!("unnormalized\\_lagrange\\_basis({})", *i), - VanishesOnLast4Rows => "vanishes\\_on\\_last\\_4\\_rows".to_string(), + UnnormalizedLagrangeBasis(RowOffset { + zk_rows: true, + offset: i, + }) => { + format!("unnormalized\\_lagrange\\_basis(zk\\_rows + {})", *i) + } + UnnormalizedLagrangeBasis(RowOffset { + zk_rows: false, + offset: i, + }) => { + format!("unnormalized\\_lagrange\\_basis({})", *i) + } + VanishesOnZeroKnowledgeAndPreviousRows => { + "vanishes\\_on\\_zero\\_knowledge\\_and\\_previous\\_row".to_string() + } BinOp(Op2::Add, x, y) => format!("({} + {})", x.latex(cache), y.latex(cache)), BinOp(Op2::Mul, x, y) => format!("({} \\cdot {})", x.latex(cache), y.latex(cache)), BinOp(Op2::Sub, x, y) => format!("({} - {})", x.latex(cache), y.latex(cache)), @@ -2498,14 +2538,32 @@ where /// Recursively print the expression, /// except for the cached expression that are stored in the `cache`. - fn text(&self, cache: &mut HashMap>>) -> String { + fn text( + &self, + cache: &mut HashMap, berkeley_columns::Column>>, + ) -> String { use Expr::*; match self { Double(x) => format!("double({})", x.text(cache)), Constant(x) => x.text(), Cell(v) => v.text(), - UnnormalizedLagrangeBasis(i) => format!("unnormalized_lagrange_basis({})", *i), - VanishesOnLast4Rows => "vanishes_on_last_4_rows".to_string(), + UnnormalizedLagrangeBasis(RowOffset { + zk_rows: true, + offset: i, + }) => match i.cmp(&0) { + Ordering::Greater => format!("unnormalized_lagrange_basis(zk_rows + {})", *i), + Ordering::Equal => "unnormalized_lagrange_basis(zk_rows)".to_string(), + Ordering::Less => format!("unnormalized_lagrange_basis(zk_rows - {})", (-*i)), + }, + UnnormalizedLagrangeBasis(RowOffset { + zk_rows: false, + offset: i, + }) => { + format!("unnormalized_lagrange_basis({})", *i) + } + VanishesOnZeroKnowledgeAndPreviousRows => { + "vanishes_on_zero_knowledge_and_previous_rows".to_string() + } BinOp(Op2::Add, x, y) => format!("({} + {})", x.text(cache), y.text(cache)), BinOp(Op2::Mul, x, y) => format!("({} * {})", x.text(cache), y.text(cache)), BinOp(Op2::Sub, x, y) => format!("({} - {})", x.text(cache), y.text(cache)), @@ -2604,36 +2662,53 @@ pub mod constraints { fn literal(x: F) -> Self; // Witness variable - fn witness(row: CurrOrNext, col: usize, env: Option<&ArgumentData>) -> Self; + fn witness( + row: CurrOrNext, + col: usize, + env: Option<&ArgumentData>, + ) -> Self; /// Coefficient - fn coeff(col: usize, env: Option<&ArgumentData>) -> Self; + fn coeff(col: usize, env: Option<&ArgumentData>) -> Self; /// Create a constant - fn constant(expr: ConstantExpr, env: Option<&ArgumentData>) -> Self; + fn constant( + expr: ConstantExpr, + env: Option<&ArgumentData>, + ) -> Self; /// Cache item fn cache(&self, cache: &mut Cache) -> Self; } - impl ExprOps for Expr> + impl ExprOps for Expr, berkeley_columns::Column> where F: PrimeField, + Expr, berkeley_columns::Column>: std::fmt::Display, { fn two_pow(pow: u64) -> Self { - Expr::>::literal(>::two_pow(pow)) + Expr::, berkeley_columns::Column>::literal(>::two_pow(pow)) } fn two_to_limb() -> Self { - Expr::>::literal(>::two_to_limb()) + Expr::, berkeley_columns::Column>::literal(>::two_to_limb( + )) } fn two_to_2limb() -> Self { - Expr::>::literal(>::two_to_2limb()) + Expr::, berkeley_columns::Column>::literal(>::two_to_2limb( + )) } fn two_to_3limb() -> Self { - Expr::>::literal(>::two_to_3limb()) + Expr::, berkeley_columns::Column>::literal(>::two_to_3limb( + )) } fn double(&self) -> Self { @@ -2660,15 +2735,22 @@ pub mod constraints { Expr::Constant(ConstantExpr::Literal(x)) } - fn witness(row: CurrOrNext, col: usize, _: Option<&ArgumentData>) -> Self { + fn witness( + row: CurrOrNext, + col: usize, + _: Option<&ArgumentData>, + ) -> Self { witness(col, row) } - fn coeff(col: usize, _: Option<&ArgumentData>) -> Self { + fn coeff(col: usize, _: Option<&ArgumentData>) -> Self { coeff(col) } - fn constant(expr: ConstantExpr, _: Option<&ArgumentData>) -> Self { + fn constant( + expr: ConstantExpr, + _: Option<&ArgumentData>, + ) -> Self { Expr::Constant(expr) } @@ -2718,21 +2800,28 @@ pub mod constraints { x } - fn witness(row: CurrOrNext, col: usize, env: Option<&ArgumentData>) -> Self { + fn witness( + row: CurrOrNext, + col: usize, + env: Option<&ArgumentData>, + ) -> Self { match env { Some(data) => data.witness[(row, col)], None => panic!("Missing witness"), } } - fn coeff(col: usize, env: Option<&ArgumentData>) -> Self { + fn coeff(col: usize, env: Option<&ArgumentData>) -> Self { match env { Some(data) => data.coeffs[col], None => panic!("Missing coefficients"), } } - fn constant(expr: ConstantExpr, env: Option<&ArgumentData>) -> Self { + fn constant( + expr: ConstantExpr, + env: Option<&ArgumentData>, + ) -> Self { match env { Some(data) => expr.value(&data.constants), None => panic!("Missing constants"), @@ -2769,7 +2858,7 @@ pub mod constraints { // /// An alias for the intended usage of the expression type in constructing constraints. -pub type E = Expr>; +pub type E = Expr, berkeley_columns::Column>; /// Convenience function to create a constant as [Expr]. pub fn constant(x: F) -> E { @@ -2778,7 +2867,7 @@ pub fn constant(x: F) -> E { /// Helper function to quickly create an expression for a witness. pub fn witness(i: usize, row: CurrOrNext) -> E { - E::::cell(Column::Witness(i), row) + E::::cell(berkeley_columns::Column::Witness(i), row) } /// Same as [witness] but for the current row. @@ -2793,11 +2882,11 @@ pub fn witness_next(i: usize) -> E { /// Handy function to quickly create an expression for a gate. pub fn index(g: GateType) -> E { - E::::cell(Column::Index(g), CurrOrNext::Curr) + E::::cell(berkeley_columns::Column::Index(g), CurrOrNext::Curr) } pub fn coeff(i: usize) -> E { - E::::cell(Column::Coefficient(i), CurrOrNext::Curr) + E::::cell(berkeley_columns::Column::Coefficient(i), CurrOrNext::Curr) } /// Auto clone macro - Helps make constraints more readable @@ -2826,6 +2915,8 @@ macro_rules! auto_clone_array { pub use auto_clone; pub use auto_clone_array; +use super::wires::KIMCHI_COLS; + /// You can import this module like `use kimchi::circuits::expr::prologue::*` to obtain a number of handy aliases and helpers pub mod prologue { pub use super::{coeff, constant, index, witness, witness_curr, witness_next, FeatureFlag, E}; @@ -2836,19 +2927,18 @@ pub mod test { use super::*; use crate::{ circuits::{ - constraints::ConstraintSystem, - expr::constraints::ExprOps, - gate::CircuitGate, - polynomial::COLUMNS, - polynomials::{generic::GenericGateSpec, permutation::ZK_ROWS}, - wires::Wire, + constraints::ConstraintSystem, expr::constraints::ExprOps, gate::CircuitGate, + polynomial::KIMCHI_COLS, polynomials::generic::GenericGateSpec, wires::Wire, }, curve::KimchiCurve, prover_index::ProverIndex, }; use ark_ff::UniformRand; use mina_curves::pasta::{Fp, Pallas, Vesta}; - use poly_commitment::srs::{endos, SRS}; + use poly_commitment::{ + evaluation_proof::OpeningProof, + srs::{endos, SRS}, + }; use rand::{prelude::StdRng, SeedableRng}; use std::array; use std::sync::Arc; @@ -2898,10 +2988,10 @@ pub mod test { let srs = Arc::new(srs); let (endo_q, _endo_r) = endos::(); - ProverIndex::::create(constraint_system, endo_q, srs) + ProverIndex::>::create(constraint_system, endo_q, srs) }; - let witness_cols: [_; COLUMNS] = array::from_fn(|_| DensePolynomial::zero()); + let witness_cols: [_; KIMCHI_COLS] = array::from_fn(|_| DensePolynomial::zero()); let permutation = DensePolynomial::zero(); let domain_evals = index.cs.evaluate(&witness_cols, &permutation); @@ -2913,10 +3003,14 @@ pub mod test { joint_combiner: None, endo_coefficient: one, mds: &Vesta::sponge_params().mds, + zk_rows: 3, }, witness: &domain_evals.d8.this.w, coefficient: &index.column_evaluations.coefficients8, - vanishes_on_last_4_rows: &index.cs.precomputations().vanishes_on_last_4_rows, + vanishes_on_zero_knowledge_and_previous_rows: &index + .cs + .precomputations() + .vanishes_on_zero_knowledge_and_previous_rows, z: &domain_evals.d8.this.z, l0_1: l0_1(index.cs.domain.d1), domain: index.cs.domain, @@ -2930,7 +3024,8 @@ pub mod test { #[test] fn test_unnormalized_lagrange_basis() { - let domain = EvaluationDomains::::create(2usize.pow(10) + ZK_ROWS as usize) + let zk_rows = 3; + let domain = EvaluationDomains::::create(2usize.pow(10) + zk_rows) .expect("failed to create evaluation domain"); let rng = &mut StdRng::from_seed([17u8; 32]); diff --git a/kimchi/src/circuits/gate.rs b/kimchi/src/circuits/gate.rs index 9befdf18f6..cdbd44a6a5 100644 --- a/kimchi/src/circuits/gate.rs +++ b/kimchi/src/circuits/gate.rs @@ -16,6 +16,7 @@ use crate::{ use ark_ff::{bytes::ToBytes, PrimeField, SquareRootField}; use num_traits::cast::ToPrimitive; use o1_utils::hasher::CryptoDigest; +use poly_commitment::OpenProof; use serde::{Deserialize, Serialize}; use serde_with::serde_as; use std::io::{Result as IoResult, Write}; @@ -168,7 +169,7 @@ impl ToBytes for CircuitGate { let typ: u8 = ToPrimitive::to_u8(&self.typ).unwrap(); typ.write(&mut w)?; // TODO: update to use real value of width here - for i in 0..COLUMNS { + for i in 0..KIMCHI_COLS { self.wires[i].write(&mut w)?; } @@ -192,56 +193,60 @@ impl CircuitGate { /// # Errors /// /// Will give error if verify process returns error. - pub fn verify>( + pub fn verify< + G: KimchiCurve, + OpeningProof: OpenProof, + const COLUMNS: usize, + >( &self, row: usize, - witness: &[Vec; W], - index: &ProverIndex, + witness: &[Vec; COLUMNS], + index: &ProverIndex, public: &[F], ) -> Result<(), String> { use GateType::*; match self.typ { Zero => Ok(()), Generic => self.verify_generic(row, witness, public), - Poseidon => self.verify_poseidon::(row, witness), + Poseidon => self.verify_poseidon::(row, witness), CompleteAdd => self.verify_complete_add(row, witness), VarBaseMul => self.verify_vbmul(row, witness), - EndoMul => self.verify_endomul::(row, witness, &index.cs), - EndoMulScalar => self.verify_endomul_scalar::(row, witness, &index.cs), + EndoMul => self.verify_endomul::(row, witness, &index.cs), + EndoMulScalar => self.verify_endomul_scalar::(row, witness, &index.cs), // TODO: implement the verification for the lookup gate Lookup => Ok(()), CairoClaim | CairoInstruction | CairoFlags | CairoTransition => { - self.verify_cairo_gate::(row, witness, &index.cs) + self.verify_cairo_gate::(row, witness, &index.cs) } RangeCheck0 | RangeCheck1 => self - .verify_witness::(row, witness, &index.cs, public) + .verify_witness::(row, witness, &index.cs, public) .map_err(|e| e.to_string()), ForeignFieldAdd => self - .verify_witness::(row, witness, &index.cs, public) + .verify_witness::(row, witness, &index.cs, public) .map_err(|e| e.to_string()), ForeignFieldMul => self - .verify_witness::(row, witness, &index.cs, public) + .verify_witness::(row, witness, &index.cs, public) .map_err(|e| e.to_string()), Xor16 => self - .verify_witness::(row, witness, &index.cs, public) + .verify_witness::(row, witness, &index.cs, public) .map_err(|e| e.to_string()), Rot64 => self - .verify_witness::(row, witness, &index.cs, public) + .verify_witness::(row, witness, &index.cs, public) .map_err(|e| e.to_string()), KeccakRound => self - .verify_witness::(row, witness, &index.cs, public) + .verify_witness::(row, witness, &index.cs, public) .map_err(|e| e.to_string()), KeccakSponge => self - .verify_witness::(row, witness, &index.cs, public) + .verify_witness::(row, witness, &index.cs, public) .map_err(|e| e.to_string()), } } /// Verify the witness against the constraints - pub fn verify_witness>( + pub fn verify_witness, const COLUMNS: usize>( &self, row: usize, - witness: &[Vec; W], + witness: &[Vec; COLUMNS], cs: &ConstraintSystem, _public: &[F], ) -> CircuitGateResult<()> { @@ -256,9 +261,11 @@ impl CircuitGate { joint_combiner: Some(F::one()), endo_coefficient: cs.endo, mds: &G::sponge_params().mds, + zk_rows: cs.zk_rows, }; // Create the argument environment for the constraints over field elements - let env = ArgumentEnv::::create(argument_witness, self.coeffs.clone(), constants); + let env = + ArgumentEnv::::create(argument_witness, self.coeffs.clone(), constants); // Check the wiring (i.e. copy constraints) for this gate // Note: Gates can operated on row Curr or Curr and Next. @@ -293,7 +300,9 @@ impl CircuitGate { // TODO: implement the verification for the generic gate vec![] } - GateType::Poseidon => poseidon::Poseidon::constraint_checks(&env, &mut cache), + GateType::Poseidon => { + poseidon::Poseidon::constraint_checks::(&env, &mut cache) + } GateType::CompleteAdd => complete_add::CompleteAdd::constraint_checks(&env, &mut cache), GateType::VarBaseMul => varbasemul::VarbaseMul::constraint_checks(&env, &mut cache), GateType::EndoMul => endosclmul::EndosclMul::constraint_checks(&env, &mut cache), @@ -348,30 +357,30 @@ impl CircuitGate { } // Return the part of the witness relevant to this gate at the given row offset - fn argument_witness( + fn argument_witness( &self, row: usize, - witness: &[Vec; W], - ) -> CircuitGateResult> { + witness: &[Vec; COLUMNS], + ) -> CircuitGateResult> { // Get the part of the witness relevant to this gate - let witness_curr: [F; W] = (0..witness.len()) + let witness_curr: [F; COLUMNS] = (0..witness.len()) .map(|col| witness[col][row]) .collect::>() .try_into() .map_err(|_| CircuitGateError::FailedToGetWitnessForRow(self.typ, row))?; - let witness_next: [F; W] = if witness[0].len() > row + 1 { + let witness_next: [F; COLUMNS] = if witness[0].len() > row + 1 { (0..witness.len()) .map(|col| witness[col][row + 1]) .collect::>() .try_into() .map_err(|_| CircuitGateError::FailedToGetWitnessForRow(self.typ, row))? } else { - [F::zero(); W] + [F::zero(); COLUMNS] }; - Ok(ArgumentWitness:: { - curr: witness_curr.to_vec(), - next: witness_next.to_vec(), + Ok(ArgumentWitness:: { + curr: witness_curr, + next: witness_next, }) } } diff --git a/kimchi/src/circuits/lookup/constraints.rs b/kimchi/src/circuits/lookup/constraints.rs index aa2de80288..317c9738c7 100644 --- a/kimchi/src/circuits/lookup/constraints.rs +++ b/kimchi/src/circuits/lookup/constraints.rs @@ -1,6 +1,7 @@ use crate::{ circuits::{ - expr::{prologue::*, Column, ConstantExpr}, + berkeley_columns::Column, + expr::{prologue::*, ConstantExpr, RowOffset}, gate::{CircuitGate, CurrOrNext}, lookup::lookups::{ JointLookup, JointLookupSpec, JointLookupValue, LocalPosition, LookupInfo, @@ -22,9 +23,6 @@ use super::runtime_tables; /// Number of constraints produced by the argument. pub const CONSTRAINTS: u32 = 7; -/// The number of random values to append to columns for zero-knowledge. -pub const ZK_ROWS: usize = 3; - /// Pad with zeroes and then add 3 random elements in the last two /// rows for zero knowledge. /// @@ -34,13 +32,15 @@ pub const ZK_ROWS: usize = 3; pub fn zk_patch( mut e: Vec, d: D, + zk_rows: usize, rng: &mut R, ) -> Evaluations> { let n = d.size(); let k = e.len(); - assert!(k <= n - ZK_ROWS); - e.extend((0..((n - ZK_ROWS) - k)).map(|_| F::zero())); - e.extend((0..ZK_ROWS).map(|_| F::rand(rng))); + let last_non_zk_row = n - zk_rows; + assert!(k <= last_non_zk_row); + e.extend((k..last_non_zk_row).map(|_| F::zero())); + e.extend((0..zk_rows).map(|_| F::rand(rng))); Evaluations::>::from_vec_and_domain(e, d) } @@ -83,15 +83,16 @@ pub fn zk_patch( /// /// Will panic if `value(s)` are missing from the `table`. #[allow(clippy::too_many_arguments)] -pub fn sorted( +pub fn sorted( dummy_lookup_value: F, joint_lookup_table_d8: &Evaluations>, d1: D, gates: &[CircuitGate], - witness: &[Vec; W], + witness: &[Vec; COLUMNS], joint_combiner: F, table_id_combiner: F, lookup_info: &LookupInfo, + zk_rows: usize, ) -> Result>, ProverError> { // We pad the lookups so that it is as if we lookup exactly // `max_lookups_per_row` in every row. @@ -99,7 +100,7 @@ pub fn sorted( let n = d1.size(); let mut counts: HashMap<&F, usize> = HashMap::new(); - let lookup_rows = n - ZK_ROWS - 1; + let lookup_rows = n - zk_rows - 1; let by_row = lookup_info.by_row(gates); let max_lookups_per_row = lookup_info.max_per_row; @@ -224,12 +225,12 @@ pub fn sorted( /// /// Will panic if final evaluation is not 1. #[allow(clippy::too_many_arguments)] -pub fn aggregation( +pub fn aggregation( dummy_lookup_value: F, joint_lookup_table_d8: &Evaluations>, d1: D, gates: &[CircuitGate], - witness: &[Vec; W], + witness: &[Vec; COLUMNS], joint_combiner: &F, table_id_combiner: &F, beta: F, @@ -237,13 +238,14 @@ pub fn aggregation( sorted: &[Evaluations>], rng: &mut R, lookup_info: &LookupInfo, + zk_rows: usize, ) -> Result>, ProverError> where R: Rng + ?Sized, F: PrimeField, { let n = d1.size(); - let lookup_rows = n - ZK_ROWS - 1; + let lookup_rows = n - zk_rows - 1; let beta1: F = F::one() + beta; let gammabeta1 = gamma * beta1; let mut lookup_aggreg = vec![F::one()]; @@ -315,11 +317,11 @@ where lookup_aggreg[i + 1] *= prev; }); - let res = zk_patch(lookup_aggreg, d1, rng); + let res = zk_patch(lookup_aggreg, d1, zk_rows, rng); // check that the final evaluation is equal to 1 if cfg!(debug_assertions) { - let final_val = res.evals[d1.size() - (ZK_ROWS + 1)]; + let final_val = res.evals[d1.size() - (zk_rows + 1)]; if final_val != F::one() { panic!("aggregation incorrect: {final_val}"); } @@ -598,14 +600,20 @@ pub fn constraints( let aggreg_equation = E::cell(Column::LookupAggreg, Next) * denominator - E::cell(Column::LookupAggreg, Curr) * numerator; - let final_lookup_row: i32 = -(ZK_ROWS as i32) - 1; + let final_lookup_row = RowOffset { + zk_rows: true, + offset: -1, + }; let mut res = vec![ - // the accumulator except for the last 4 rows + // the accumulator except for the last zk_rows+1 rows // (contains the zk-rows and the last value of the accumulator) - E::VanishesOnLast4Rows * aggreg_equation, + E::VanishesOnZeroKnowledgeAndPreviousRows * aggreg_equation, // the initial value of the accumulator - E::UnnormalizedLagrangeBasis(0) * (E::cell(Column::LookupAggreg, Curr) - E::one()), + E::UnnormalizedLagrangeBasis(RowOffset { + zk_rows: false, + offset: 0, + }) * (E::cell(Column::LookupAggreg, Curr) - E::one()), // Check that the final value of the accumulator is 1 E::UnnormalizedLagrangeBasis(final_lookup_row) * (E::cell(Column::LookupAggreg, Curr) - E::one()), @@ -619,7 +627,10 @@ pub fn constraints( final_lookup_row } else { // Check compatibility of the first elements - 0 + RowOffset { + zk_rows: false, + offset: 0, + } }; let mut expr = E::UnnormalizedLagrangeBasis(first_or_last) * (column(Column::LookupSorted(i)) - column(Column::LookupSorted(i + 1))); @@ -667,23 +678,24 @@ pub fn constraints( /// /// Will panic if `d1` and `s` domain sizes do not match. #[allow(clippy::too_many_arguments)] -pub fn verify, TABLE: Fn() -> I>( +pub fn verify, TABLE: Fn() -> I>( dummy_lookup_value: F, lookup_table: TABLE, lookup_table_entries: usize, d1: D, gates: &[CircuitGate], - witness: &[Vec; W], + witness: &[Vec; COLUMNS], joint_combiner: &F, table_id_combiner: &F, sorted: &[Evaluations>], lookup_info: &LookupInfo, + zk_rows: usize, ) { sorted .iter() .for_each(|s| assert_eq!(d1.size, s.domain().size)); let n = d1.size(); - let lookup_rows = n - ZK_ROWS - 1; + let lookup_rows = n - zk_rows - 1; // Check that the (desnakified) sorted table is // 1. Sorted diff --git a/kimchi/src/circuits/lookup/index.rs b/kimchi/src/circuits/lookup/index.rs index 32b565b5f0..964c4aeff5 100644 --- a/kimchi/src/circuits/lookup/index.rs +++ b/kimchi/src/circuits/lookup/index.rs @@ -7,7 +7,6 @@ use crate::circuits::{ lookups::{LookupInfo, LookupPattern}, tables::LookupTable, }, - polynomials::permutation::ZK_ROWS, }; use ark_ff::{FftField, PrimeField, SquareRootField}; use ark_poly::{ @@ -227,11 +226,12 @@ impl LookupConstraintSystem { /// # Errors /// /// Will give error if inputs validation do not match. - pub fn create( + pub fn create( gates: &[CircuitGate], lookup_tables: Vec>, runtime_tables: Option>>, domain: &EvaluationDomains, + zk_rows: usize, ) -> Result, LookupError> { //~ 1. If no lookup is used in the circuit, do not create a lookup index match LookupInfo::create_from_gates(gates, runtime_tables.is_some()) { @@ -240,14 +240,14 @@ impl LookupConstraintSystem { let d1_size = domain.d1.size(); // The maximum number of entries that can be provided across all tables. - // Since we do not assert the lookup constraint on the final `ZK_ROWS` rows, and + // Since we do not assert the lookup constraint on the final `zk_rows` rows, and // because the row before is used to assert that the lookup argument's final // product is 1, we cannot use those rows to store any values. - let max_num_entries = d1_size - (ZK_ROWS as usize) - 1; + let max_num_entries = d1_size - zk_rows - 1; //~ 2. Get the lookup selectors and lookup tables (TODO: how?) let (lookup_selectors, gate_lookup_tables) = - lookup_info.selector_polynomials_and_tables::(domain, gates); + lookup_info.selector_polynomials_and_tables::(domain, gates); //~ 3. Concatenate runtime lookup tables with the ones used by gates let mut lookup_tables: Vec<_> = gate_lookup_tables @@ -285,8 +285,8 @@ impl LookupConstraintSystem { .take(d1_size - runtime_table_offset - runtime_len), ); - // although the last ZK_ROWS are fine - for e in evals.iter_mut().rev().take(ZK_ROWS as usize) { + // although the last zk_rows are fine + for e in evals.iter_mut().rev().take(zk_rows) { *e = F::zero(); } @@ -324,7 +324,7 @@ impl LookupConstraintSystem { //~ that a lookup table can have. let max_table_width = lookup_tables .iter() - .map(|table| table.data.len()) + .map(|table| table.width()) .max() .unwrap_or(0); @@ -376,7 +376,7 @@ impl LookupConstraintSystem { let mut has_table_id_0_with_zero_entry = false; for table in &lookup_tables { - let table_len = table.data[0].len(); + let table_len = table.len(); if table.id == 0 { has_table_id_0 = true; @@ -394,6 +394,7 @@ impl LookupConstraintSystem { //~~ * Copy the entries from the table to new rows in the corresponding columns of the concatenated table. for (i, col) in table.data.iter().enumerate() { + // See GH issue: https://github.com/MinaProtocol/mina/issues/14097 if col.len() != table_len { return Err(LookupError::InconsistentTableLength); } @@ -401,7 +402,7 @@ impl LookupConstraintSystem { } //~~ * Fill in any unused columns with 0 (to match the dummy value) - for lookup_table in lookup_table.iter_mut().skip(table.data.len()) { + for lookup_table in lookup_table.iter_mut().skip(table.width()) { lookup_table.extend(repeat_n(F::zero(), table_len)); } } diff --git a/kimchi/src/circuits/lookup/lookups.rs b/kimchi/src/circuits/lookup/lookups.rs index f14c68342d..3bc66d8fdc 100644 --- a/kimchi/src/circuits/lookup/lookups.rs +++ b/kimchi/src/circuits/lookup/lookups.rs @@ -42,6 +42,7 @@ fn max_lookups_per_row(kinds: LookupPatterns) -> usize { feature = "ocaml_types", derive(ocaml::IntoValue, ocaml::FromValue, ocaml_gen::Struct) )] +#[cfg_attr(feature = "wasm_types", wasm_bindgen::prelude::wasm_bindgen)] pub struct LookupPatterns { pub xor: bool, pub lookup: bool, @@ -147,6 +148,7 @@ impl LookupPatterns { feature = "ocaml_types", derive(ocaml::IntoValue, ocaml::FromValue, ocaml_gen::Struct) )] +#[cfg_attr(feature = "wasm_types", wasm_bindgen::prelude::wasm_bindgen)] pub struct LookupFeatures { /// A single lookup constraint is a vector of lookup constraints to be applied at a row. pub patterns: LookupPatterns, @@ -172,6 +174,7 @@ impl LookupFeatures { /// Describes the desired lookup configuration. #[derive(Copy, Clone, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "wasm_types", wasm_bindgen::prelude::wasm_bindgen)] pub struct LookupInfo { /// The maximum length of an element of `kinds`. This can be computed from `kinds`. pub max_per_row: usize, @@ -211,7 +214,7 @@ impl LookupInfo { /// Each entry in `kinds` has a corresponding selector polynomial that controls whether that /// lookup kind should be enforced at a given row. This computes those selector polynomials. - pub fn selector_polynomials_and_tables( + pub fn selector_polynomials_and_tables( &self, domain: &EvaluationDomains, gates: &[CircuitGate], @@ -750,3 +753,62 @@ fn lookup_pattern_constants_correct() { assert_eq!((pat, pat.max_joint_size()), (pat, max_joint_size as u32)); } } + +#[cfg(feature = "wasm_types")] +pub mod wasm { + use super::*; + + #[wasm_bindgen::prelude::wasm_bindgen] + impl LookupPatterns { + #[wasm_bindgen::prelude::wasm_bindgen(constructor)] + pub fn new( + xor: bool, + lookup: bool, + range_check: bool, + foreign_field_mul: bool, + keccak_round: bool, + keccak_sponge: bool, + ) -> LookupPatterns { + LookupPatterns { + xor, + lookup, + range_check, + foreign_field_mul, + keccak_round, + keccak_sponge, + } + } + } + + #[wasm_bindgen::prelude::wasm_bindgen] + impl LookupFeatures { + #[wasm_bindgen::prelude::wasm_bindgen(constructor)] + pub fn new( + patterns: LookupPatterns, + joint_lookup_used: bool, + uses_runtime_tables: bool, + ) -> LookupFeatures { + LookupFeatures { + patterns, + joint_lookup_used, + uses_runtime_tables, + } + } + } + + #[wasm_bindgen::prelude::wasm_bindgen] + impl LookupInfo { + #[wasm_bindgen::prelude::wasm_bindgen(constructor)] + pub fn new( + max_per_row: usize, + max_joint_size: u32, + features: LookupFeatures, + ) -> LookupInfo { + LookupInfo { + max_per_row, + max_joint_size, + features, + } + } + } +} diff --git a/kimchi/src/circuits/lookup/runtime_tables.rs b/kimchi/src/circuits/lookup/runtime_tables.rs index 98b018ed8a..f8123d75d8 100644 --- a/kimchi/src/circuits/lookup/runtime_tables.rs +++ b/kimchi/src/circuits/lookup/runtime_tables.rs @@ -2,10 +2,7 @@ //! The setup has to prepare for their presence using [`RuntimeTableCfg`]. //! At proving time, the prover can use [`RuntimeTable`] to specify the actual tables. -use crate::circuits::{ - expr::{prologue::*, Column}, - gate::CurrOrNext, -}; +use crate::circuits::{berkeley_columns::Column, expr::prologue::*, gate::CurrOrNext}; use ark_ff::Field; use serde::{Deserialize, Serialize}; diff --git a/kimchi/src/circuits/lookup/tables/bits16.rs b/kimchi/src/circuits/lookup/tables/bits16.rs index 50ae56b613..91db8ae786 100644 --- a/kimchi/src/circuits/lookup/tables/bits16.rs +++ b/kimchi/src/circuits/lookup/tables/bits16.rs @@ -6,10 +6,6 @@ use super::BITS16_TABLE_ID; //~ The lookup table for 16-bits /// Returns the lookup table for all 16-bit values -/// -/// # Panics -/// -/// Will panic if `data` is invalid. pub fn bits16_table() -> LookupTable { let mut data = vec![vec![]; 1]; diff --git a/kimchi/src/circuits/lookup/tables/mod.rs b/kimchi/src/circuits/lookup/tables/mod.rs index f06e879639..8e01a3d4fe 100644 --- a/kimchi/src/circuits/lookup/tables/mod.rs +++ b/kimchi/src/circuits/lookup/tables/mod.rs @@ -53,7 +53,7 @@ where pub fn has_zero_entry(&self) -> bool { // reminder: a table is written as a list of columns, // not as a list of row entries. - for row in 0..self.data[0].len() { + for row in 0..self.len() { for col in &self.data { if !col[row].is_zero() { continue; @@ -65,6 +65,13 @@ where false } + /// Returns the number of columns, i.e. the width of the table. + /// It is less error prone to introduce this method than using the public + /// field data. + pub fn width(&self) -> usize { + self.data.len() + } + /// Returns the length of the table. pub fn len(&self) -> usize { self.data[0].len() diff --git a/kimchi/src/circuits/lookup/tables/reset.rs b/kimchi/src/circuits/lookup/tables/reset.rs index 2be1e04bd3..9e359f3ad9 100644 --- a/kimchi/src/circuits/lookup/tables/reset.rs +++ b/kimchi/src/circuits/lookup/tables/reset.rs @@ -8,10 +8,6 @@ use super::RESET_TABLE_ID; //~ The first column contains the 16-bit values, and the second column contains their expansion to 64-bit values. /// Returns the sparse lookup table -/// -/// # Panics -/// -/// Will panic if `data` is invalid. pub fn reset_table() -> LookupTable { let mut data = vec![vec![]; 2]; diff --git a/kimchi/src/circuits/lookup/tables/sparse.rs b/kimchi/src/circuits/lookup/tables/sparse.rs index e1a3fcc79d..e33130197f 100644 --- a/kimchi/src/circuits/lookup/tables/sparse.rs +++ b/kimchi/src/circuits/lookup/tables/sparse.rs @@ -5,10 +5,6 @@ use ark_ff::Field; //~ This is a 1-column table containing the sparse representation of all 16-bit preimages. /// Returns the sparse lookup table -/// -/// # Panics -/// -/// Will panic if `data` is invalid. pub fn sparse_table() -> LookupTable { let mut data = vec![vec![]; 1]; diff --git a/kimchi/src/circuits/lookup/tables/xor.rs b/kimchi/src/circuits/lookup/tables/xor.rs index d846942a31..3ecc9d3b4f 100644 --- a/kimchi/src/circuits/lookup/tables/xor.rs +++ b/kimchi/src/circuits/lookup/tables/xor.rs @@ -14,10 +14,6 @@ use ark_ff::Field; //~ will translate into a scalar multiplication by 0, which is free. /// Returns the XOR lookup table -/// -/// # Panics -/// -/// Will panic if `data` is invalid. pub fn xor_table() -> LookupTable { let mut data = vec![vec![]; 3]; diff --git a/kimchi/src/circuits/mod.rs b/kimchi/src/circuits/mod.rs index 83dbc91c63..7bad1cfd9f 100644 --- a/kimchi/src/circuits/mod.rs +++ b/kimchi/src/circuits/mod.rs @@ -2,6 +2,7 @@ pub mod macros; pub mod argument; +pub mod berkeley_columns; pub mod constraints; pub mod domain_constant_evaluation; pub mod domains; diff --git a/kimchi/src/circuits/polynomial.rs b/kimchi/src/circuits/polynomial.rs index 15e78da80a..12fd352f30 100644 --- a/kimchi/src/circuits/polynomial.rs +++ b/kimchi/src/circuits/polynomial.rs @@ -1,6 +1,6 @@ //! This module implements Plonk prover polynomials primitive. -pub use super::wires::COLUMNS; +pub use super::wires::KIMCHI_COLS; use ark_ff::FftField; use ark_poly::{univariate::DensePolynomial, Evaluations, Radix2EvaluationDomain as D}; @@ -8,27 +8,27 @@ use ark_poly::{univariate::DensePolynomial, Evaluations, Radix2EvaluationDomain /// Evaluations of the wires and permutation #[derive(Clone)] -pub struct WitnessEvals { +pub struct WitnessEvals { /// wire evaluations - pub w: [Evaluations>; W], + pub w: [Evaluations>; COLUMNS], /// permutation evaluations pub z: Evaluations>, } #[derive(Clone)] -pub struct WitnessShifts { +pub struct WitnessShifts { /// this wire evaluations - pub this: WitnessEvals, + pub this: WitnessEvals, /// next wire evaluations - pub next: WitnessEvals, + pub next: WitnessEvals, } #[derive(Clone)] -pub struct WitnessOverDomains { +pub struct WitnessOverDomains { /// evaluations over domain d4 - pub d4: WitnessShifts, + pub d4: WitnessShifts, /// evaluations over domain d8 - pub d8: WitnessShifts, + pub d8: WitnessShifts, } // PLOOKUP diff --git a/kimchi/src/circuits/polynomials/and.rs b/kimchi/src/circuits/polynomials/and.rs index 6c52e95442..b9f5abdcfb 100644 --- a/kimchi/src/circuits/polynomials/and.rs +++ b/kimchi/src/circuits/polynomials/and.rs @@ -133,11 +133,11 @@ pub fn lookup_table() -> LookupTable { /// Create a And for inputs as field elements starting at row 0 /// Input: first input, second input, and desired byte length /// Panics if the input is too large for the chosen number of bytes -pub fn create_and_witness( +pub fn create_and_witness( input1: F, input2: F, bytes: usize, -) -> [Vec; W] { +) -> [Vec; COLUMNS] { let input1_big = input1.to_biguint(); let input2_big = input2.to_biguint(); if bytes * 8 < input1_big.bitlen() || bytes * 8 < input2_big.bitlen() { @@ -153,7 +153,7 @@ pub fn create_and_witness( let sum = input1 + input2; let and_row = num_xors(bytes * 8) + 1; - let mut and_witness: [Vec; W] = array::from_fn(|_| vec![F::zero(); and_row + 1]); + let mut and_witness: [Vec; COLUMNS] = array::from_fn(|_| vec![F::zero(); and_row + 1]); init_xor(&mut and_witness, 0, bytes * 8, (input1, input2, xor)); // Fill in double generic witness @@ -170,14 +170,14 @@ pub fn create_and_witness( /// Extends an AND witness to the whole witness /// Input: first input, second input, and desired byte length /// Panics if the input is too large for the chosen number of bytes -pub fn extend_and_witness( - witness: &mut [Vec; W], +pub fn extend_and_witness( + witness: &mut [Vec; COLUMNS], input1: F, input2: F, bytes: usize, ) { - let and_witness = create_and_witness::(input1, input2, bytes); - for col in 0..W { + let and_witness = create_and_witness::(input1, input2, bytes); + for col in 0..COLUMNS { witness[col].extend(and_witness[col].iter()); } } diff --git a/kimchi/src/circuits/polynomials/complete_add.rs b/kimchi/src/circuits/polynomials/complete_add.rs index 8f4f199d22..3b39b9d711 100644 --- a/kimchi/src/circuits/polynomials/complete_add.rs +++ b/kimchi/src/circuits/polynomials/complete_add.rs @@ -97,7 +97,10 @@ where const ARGUMENT_TYPE: ArgumentType = ArgumentType::Gate(GateType::CompleteAdd); const CONSTRAINTS: u32 = 7; - fn constraint_checks>(env: &ArgumentEnv, cache: &mut Cache) -> Vec { + fn constraint_checks, const COLUMNS: usize>( + env: &ArgumentEnv, + cache: &mut Cache, + ) -> Vec { // This function makes 2 + 1 + 1 + 1 + 2 = 7 constraints let x1 = env.witness_curr(0); let y1 = env.witness_curr(1); @@ -227,10 +230,10 @@ impl CircuitGate { /// # Panics /// /// Will panic if `multiplicative inverse` operation between gate values fails. - pub fn verify_complete_add( + pub fn verify_complete_add( &self, row: usize, - witness: &[Vec; W], + witness: &[Vec; COLUMNS], ) -> Result<(), String> { let x1 = witness[0][row]; let y1 = witness[1][row]; diff --git a/kimchi/src/circuits/polynomials/endomul_scalar.rs b/kimchi/src/circuits/polynomials/endomul_scalar.rs index 24a8486e6e..5e46d74432 100644 --- a/kimchi/src/circuits/polynomials/endomul_scalar.rs +++ b/kimchi/src/circuits/polynomials/endomul_scalar.rs @@ -7,7 +7,7 @@ use crate::{ constraints::ConstraintSystem, expr::{constraints::ExprOps, Cache}, gate::{CircuitGate, GateType}, - wires::COLUMNS, + wires::KIMCHI_COLS, }, curve::KimchiCurve, }; @@ -21,10 +21,10 @@ impl CircuitGate { /// # Errors /// /// Will give error if `self.typ` is not `GateType::EndoMulScalar`, or there are errors in gate values. - pub fn verify_endomul_scalar>( + pub fn verify_endomul_scalar, const COLUMNS: usize>( &self, row: usize, - witness: &[Vec; W], + witness: &[Vec; COLUMNS], _cs: &ConstraintSystem, ) -> Result<(), String> { ensure_eq!(self.typ, GateType::EndoMulScalar, "incorrect gate type"); @@ -167,7 +167,10 @@ where const ARGUMENT_TYPE: ArgumentType = ArgumentType::Gate(GateType::EndoMulScalar); const CONSTRAINTS: u32 = 11; - fn constraint_checks>(env: &ArgumentEnv, cache: &mut Cache) -> Vec { + fn constraint_checks, const COLUMNS: usize>( + env: &ArgumentEnv, + cache: &mut Cache, + ) -> Vec { let n0 = env.witness_curr(0); let n8 = env.witness_curr(1); let a0 = env.witness_curr(2); @@ -219,7 +222,7 @@ where /// /// Will panic if `num_bits` length is not multiple of `bits_per_row` length. pub fn gen_witness( - witness_cols: &mut [Vec; COLUMNS], + witness_cols: &mut [Vec; KIMCHI_COLS], scalar: F, endo_scalar: F, num_bits: usize, diff --git a/kimchi/src/circuits/polynomials/endosclmul.rs b/kimchi/src/circuits/polynomials/endosclmul.rs index 40ef369885..63156559c8 100644 --- a/kimchi/src/circuits/polynomials/endosclmul.rs +++ b/kimchi/src/circuits/polynomials/endosclmul.rs @@ -12,7 +12,7 @@ use crate::{ Cache, }, gate::{CircuitGate, GateType}, - wires::{GateWires, COLUMNS}, + wires::{GateWires, KIMCHI_COLS}, }, curve::KimchiCurve, proof::{PointEvaluations, ProofEvaluations}, @@ -125,16 +125,16 @@ impl CircuitGate { /// # Errors /// /// Will give error if `self.typ` is not `GateType::EndoMul`, or `constraint evaluation` fails. - pub fn verify_endomul>( + pub fn verify_endomul, const COLUMNS: usize>( &self, row: usize, - witness: &[Vec; W], + witness: &[Vec; COLUMNS], cs: &ConstraintSystem, ) -> Result<(), String> { ensure_eq!(self.typ, GateType::EndoMul, "incorrect gate type"); - let this: [F; W] = std::array::from_fn(|i| witness[i][row]); - let next: [F; W] = std::array::from_fn(|i| witness[i][row + 1]); + let this: [F; COLUMNS] = std::array::from_fn(|i| witness[i][row]); + let next: [F; COLUMNS] = std::array::from_fn(|i| witness[i][row + 1]); let pt = F::from(123456u64); @@ -145,9 +145,10 @@ impl CircuitGate { joint_combiner: None, mds: &G::sponge_params().mds, endo_coefficient: cs.endo, + zk_rows: cs.zk_rows, }; - let evals: ProofEvaluations> = + let evals: ProofEvaluations, COLUMNS> = ProofEvaluations::dummy_with_witness_evaluations(this, next); let constraints = EndosclMul::::constraints(&mut Cache::default()); @@ -185,7 +186,10 @@ where const ARGUMENT_TYPE: ArgumentType = ArgumentType::Gate(GateType::EndoMul); const CONSTRAINTS: u32 = 11; - fn constraint_checks>(env: &ArgumentEnv, cache: &mut Cache) -> Vec { + fn constraint_checks, const COLUMNS: usize>( + env: &ArgumentEnv, + cache: &mut Cache, + ) -> Vec { let b1 = env.witness_curr(11); let b2 = env.witness_curr(12); let b3 = env.witness_curr(13); @@ -270,7 +274,7 @@ pub struct EndoMulResult { /// /// Will panic if `bits` length does not match the requirement. pub fn gen_witness( - w: &mut [Vec; COLUMNS], + w: &mut [Vec; KIMCHI_COLS], row0: usize, endo: F, base: (F, F), diff --git a/kimchi/src/circuits/polynomials/foreign_field_add/circuitgates.rs b/kimchi/src/circuits/polynomials/foreign_field_add/circuitgates.rs index cc25e60565..289d31a6bf 100644 --- a/kimchi/src/circuits/polynomials/foreign_field_add/circuitgates.rs +++ b/kimchi/src/circuits/polynomials/foreign_field_add/circuitgates.rs @@ -142,7 +142,10 @@ where const ARGUMENT_TYPE: ArgumentType = ArgumentType::Gate(GateType::ForeignFieldAdd); const CONSTRAINTS: u32 = 4; - fn constraint_checks>(env: &ArgumentEnv, _cache: &mut Cache) -> Vec { + fn constraint_checks, const COLUMNS: usize>( + env: &ArgumentEnv, + _cache: &mut Cache, + ) -> Vec { let foreign_modulus: [T; LIMB_COUNT] = array::from_fn(|i| env.coeff(i)); // stored as coefficient for better correspondance with the relation being proved diff --git a/kimchi/src/circuits/polynomials/foreign_field_add/witness.rs b/kimchi/src/circuits/polynomials/foreign_field_add/witness.rs index 57af9b6ff0..55756468d4 100644 --- a/kimchi/src/circuits/polynomials/foreign_field_add/witness.rs +++ b/kimchi/src/circuits/polynomials/foreign_field_add/witness.rs @@ -4,7 +4,7 @@ use crate::circuits::expr::constraints::compact_limb; use crate::circuits::witness::Variables; use crate::{ circuits::{ - polynomial::COLUMNS, + polynomial::KIMCHI_COLS, witness::{self, ConstantCell, VariableCell, WitnessCell}, }, variable_map, @@ -130,7 +130,7 @@ pub fn create_chain( inputs: &Vec, opcodes: &[FFOps], modulus: BigUint, -) -> [Vec; COLUMNS] { +) -> [Vec; KIMCHI_COLS] { if modulus > BigUint::max_foreign_field_modulus::() { panic!( "foreign_field_modulus exceeds maximum: {} > {}", @@ -178,14 +178,14 @@ pub fn create_chain( } fn init_ffadd_row( - witness: &mut [Vec; COLUMNS], + witness: &mut [Vec; KIMCHI_COLS], offset: usize, left: [F; 3], right: [F; 3], overflow: F, carry: F, ) { - let layout: [Vec>>; 1] = [ + let layout: [Vec>>; 1] = [ // ForeignFieldAdd row vec![ VariableCell::create("left_lo"), @@ -215,13 +215,13 @@ fn init_ffadd_row( } fn init_bound_rows( - witness: &mut [Vec; COLUMNS], + witness: &mut [Vec; KIMCHI_COLS], offset: usize, result: &[F; 3], bound: &[F; 3], carry: &F, ) { - let layout: [Vec>>; 2] = [ + let layout: [Vec>>; 2] = [ vec![ // ForeignFieldAdd row VariableCell::create("result_lo"), @@ -270,7 +270,7 @@ fn init_bound_rows( /// Create witness for bound computation addition gate pub fn extend_witness_bound_addition( - witness: &mut [Vec; COLUMNS], + witness: &mut [Vec; KIMCHI_COLS], limbs: &[F; 3], foreign_field_modulus: &[F; 3], ) { @@ -297,7 +297,7 @@ pub fn extend_witness_bound_addition( // Extend the witness for the add gate let offset = witness[0].len(); - for col in witness.iter_mut().take(COLUMNS) { + for col in witness.iter_mut().take(KIMCHI_COLS) { col.extend(std::iter::repeat(F::zero()).take(2)) } diff --git a/kimchi/src/circuits/polynomials/foreign_field_mul/circuitgates.rs b/kimchi/src/circuits/polynomials/foreign_field_mul/circuitgates.rs index 4baa6a9eb9..b4e390ccd8 100644 --- a/kimchi/src/circuits/polynomials/foreign_field_mul/circuitgates.rs +++ b/kimchi/src/circuits/polynomials/foreign_field_mul/circuitgates.rs @@ -193,7 +193,10 @@ where const CONSTRAINTS: u32 = 11; // DEGREE is 4 - fn constraint_checks>(env: &ArgumentEnv, _cache: &mut Cache) -> Vec { + fn constraint_checks, const COLUMNS: usize>( + env: &ArgumentEnv, + _cache: &mut Cache, + ) -> Vec { let mut constraints = vec![]; // diff --git a/kimchi/src/circuits/polynomials/foreign_field_mul/gadget.rs b/kimchi/src/circuits/polynomials/foreign_field_mul/gadget.rs index abf81cfcbc..9bbf590d31 100644 --- a/kimchi/src/circuits/polynomials/foreign_field_mul/gadget.rs +++ b/kimchi/src/circuits/polynomials/foreign_field_mul/gadget.rs @@ -96,7 +96,7 @@ pub fn circuit_gates() -> [GateType; GATE_COUNT] { } /// Get combined constraints for a given foreign field multiplication circuit gate -pub fn circuit_gate_constraints( +pub fn circuit_gate_constraints( typ: GateType, alphas: &Alphas, cache: &mut Cache, diff --git a/kimchi/src/circuits/polynomials/foreign_field_mul/witness.rs b/kimchi/src/circuits/polynomials/foreign_field_mul/witness.rs index 50fb4d2305..8284916480 100644 --- a/kimchi/src/circuits/polynomials/foreign_field_mul/witness.rs +++ b/kimchi/src/circuits/polynomials/foreign_field_mul/witness.rs @@ -3,7 +3,7 @@ use crate::{ auto_clone_array, circuits::{ - polynomial::COLUMNS, + polynomial::KIMCHI_COLS, polynomials::{foreign_field_add, range_check}, witness::{self, ConstantCell, VariableBitsCell, VariableCell, Variables, WitnessCell}, }, @@ -41,7 +41,7 @@ use super::circuitgates; // // so that most significant limb, q2, is in W[2][0]. // -fn create_layout() -> [Vec>>; 2] { +fn create_layout() -> [Vec>>; 2] { [ // ForeignFieldMul row vec![ @@ -147,7 +147,7 @@ pub fn create( left_input: &BigUint, right_input: &BigUint, foreign_field_modulus: &BigUint, -) -> ([Vec; COLUMNS], ExternalChecks) { +) -> ([Vec; KIMCHI_COLS], ExternalChecks) { let mut witness = array::from_fn(|_| vec![F::zero(); 0]); let mut external_checks = ExternalChecks::::default(); @@ -269,7 +269,7 @@ impl ExternalChecks { } /// Extend the witness with external multi range_checks - pub fn extend_witness_multi_range_checks(&mut self, witness: &mut [Vec; COLUMNS]) { + pub fn extend_witness_multi_range_checks(&mut self, witness: &mut [Vec; KIMCHI_COLS]) { for [v0, v1, v2] in self.multi_ranges.clone() { range_check::witness::extend_multi(witness, v0, v1, v2) } @@ -277,7 +277,10 @@ impl ExternalChecks { } /// Extend the witness with external compact multi range_checks - pub fn extend_witness_compact_multi_range_checks(&mut self, witness: &mut [Vec; COLUMNS]) { + pub fn extend_witness_compact_multi_range_checks( + &mut self, + witness: &mut [Vec; KIMCHI_COLS], + ) { for [v01, v2] in self.compact_multi_ranges.clone() { range_check::witness::extend_multi_compact(witness, v01, v2) } @@ -285,7 +288,7 @@ impl ExternalChecks { } /// Extend the witness with external compact multi range_checks - pub fn extend_witness_limb_checks(&mut self, witness: &mut [Vec; COLUMNS]) { + pub fn extend_witness_limb_checks(&mut self, witness: &mut [Vec; KIMCHI_COLS]) { for chunk in self.limb_ranges.clone().chunks(3) { // Pad with zeros if necessary let limbs = match chunk.len() { @@ -302,7 +305,7 @@ impl ExternalChecks { /// Extend the witness with external bound addition as foreign field addition pub fn extend_witness_bound_addition( &mut self, - witness: &mut [Vec; COLUMNS], + witness: &mut [Vec; KIMCHI_COLS], foreign_field_modulus: &[F; 3], ) { for bound in self.bounds.clone() { @@ -318,13 +321,13 @@ impl ExternalChecks { /// Extend the witness with external high bounds additions as double generic gates pub fn extend_witness_high_bounds_computation( &mut self, - witness: &mut [Vec; COLUMNS], + witness: &mut [Vec; KIMCHI_COLS], foreign_field_modulus: &BigUint, ) { let hi_limb = F::two_to_limb() - foreign_field_modulus.to_field_limbs::()[2] - F::one(); for chunk in self.high_bounds.clone().chunks(2) { // Extend the witness for the generic gate - for col in witness.iter_mut().take(COLUMNS) { + for col in witness.iter_mut().take(KIMCHI_COLS) { col.extend(std::iter::repeat(F::zero()).take(1)) } let last_row = witness[0].len() - 1; diff --git a/kimchi/src/circuits/polynomials/generic.rs b/kimchi/src/circuits/polynomials/generic.rs index 717b61c2a0..29bd71b825 100644 --- a/kimchi/src/circuits/polynomials/generic.rs +++ b/kimchi/src/circuits/polynomials/generic.rs @@ -42,6 +42,7 @@ use crate::circuits::{ use crate::{curve::KimchiCurve, prover_index::ProverIndex}; use ark_ff::{FftField, PrimeField, Zero}; use ark_poly::univariate::DensePolynomial; +use poly_commitment::OpenProof; use std::array; use std::marker::PhantomData; @@ -73,7 +74,10 @@ where const ARGUMENT_TYPE: ArgumentType = ArgumentType::Gate(GateType::Generic); const CONSTRAINTS: u32 = 2; - fn constraint_checks>(env: &ArgumentEnv, _cache: &mut Cache) -> Vec { + fn constraint_checks, const COLUMNS: usize>( + env: &ArgumentEnv, + _cache: &mut Cache, + ) -> Vec { // First generic gate let left_coeff1 = env.coeff(0); let right_coeff1 = env.coeff(1); @@ -256,14 +260,14 @@ pub mod testing { /// # Errors /// /// Will give error if `self.typ` is not `GateType::Generic`. - pub fn verify_generic( + pub fn verify_generic( &self, row: usize, - witness: &[Vec; W], + witness: &[Vec; COLUMNS], public: &[F], ) -> Result<(), String> { // assignments - let this: [F; W] = array::from_fn(|i| witness[i][row]); + let this: [F; COLUMNS] = array::from_fn(|i| witness[i][row]); // constants let zero = F::zero(); @@ -306,14 +310,20 @@ pub mod testing { } } - impl> ProverIndex { + impl< + F: PrimeField, + G: KimchiCurve, + OpeningProof: OpenProof, + const COLUMNS: usize, + > ProverIndex + { /// Function to verify the generic polynomials with a witness. pub fn verify_generic( &self, - witness: &[DensePolynomial; W], + witness: &[DensePolynomial; COLUMNS], public: &DensePolynomial, ) -> bool { - let coefficientsm: [_; W] = array::from_fn(|i| { + let coefficientsm: [_; COLUMNS] = array::from_fn(|i| { self.column_evaluations.coefficients8[i] .clone() .interpolate() @@ -363,7 +373,7 @@ pub mod testing { /// # Panics /// /// Will panic if `gates_row` is None. - pub fn create_circuit( + pub fn create_circuit( start_row: usize, public: usize, ) -> Vec> { @@ -422,9 +432,9 @@ pub mod testing { /// # Panics /// /// Will panic if `witness_row` is None. - pub fn fill_in_witness( + pub fn fill_in_witness( start_row: usize, - witness: &mut [Vec; W], + witness: &mut [Vec; COLUMNS], public: &[F], ) { // fill witness diff --git a/kimchi/src/circuits/polynomials/keccak/circuitgates.rs b/kimchi/src/circuits/polynomials/keccak/circuitgates.rs index 8903d7dc9c..7037f4f651 100644 --- a/kimchi/src/circuits/polynomials/keccak/circuitgates.rs +++ b/kimchi/src/circuits/polynomials/keccak/circuitgates.rs @@ -91,7 +91,10 @@ where const CONSTRAINTS: u32 = 389; // Constraints for one round of the Keccak permutation function - fn constraint_checks>(env: &ArgumentEnv, _cache: &mut Cache) -> Vec { + fn constraint_checks, const COLUMNS: usize>( + env: &ArgumentEnv, + _cache: &mut Cache, + ) -> Vec { let mut constraints = vec![]; // DEFINE ROUND CONSTANT @@ -213,7 +216,10 @@ where const CONSTRAINTS: u32 = 568; // Constraints for one round of the Keccak permutation function - fn constraint_checks>(env: &ArgumentEnv, _cache: &mut Cache) -> Vec { + fn constraint_checks, const COLUMNS: usize>( + env: &ArgumentEnv, + _cache: &mut Cache, + ) -> Vec { let mut constraints = vec![]; // LOAD WITNESS diff --git a/kimchi/src/circuits/polynomials/keccak/witness.rs b/kimchi/src/circuits/polynomials/keccak/witness.rs index 0b416f9369..cc575d1f38 100644 --- a/kimchi/src/circuits/polynomials/keccak/witness.rs +++ b/kimchi/src/circuits/polynomials/keccak/witness.rs @@ -19,9 +19,9 @@ use super::{ RATE, }; -type Layout = Vec>>>; +type Layout = Vec, COLUMNS>>>; -fn layout_round() -> [Layout; 1] { +fn layout_round() -> [Layout; 1] { [vec![ IndexCell::create("state_a", 0, 100), IndexCell::create("shifts_c", 100, 180), @@ -41,7 +41,7 @@ fn layout_round() -> [Layout; 1] { ]] } -fn layout_sponge() -> [Layout; 1] { +fn layout_sponge() -> [Layout; 1] { [vec![ IndexCell::create("old_state", 0, 100), IndexCell::create("new_state", 100, 200), diff --git a/kimchi/src/circuits/polynomials/not.rs b/kimchi/src/circuits/polynomials/not.rs index 4db55e4d6d..8ac3435cd6 100644 --- a/kimchi/src/circuits/polynomials/not.rs +++ b/kimchi/src/circuits/polynomials/not.rs @@ -3,7 +3,7 @@ //! Note that this module does not include a `Not` gate type. use crate::circuits::{ gate::{CircuitGate, Connect, GateType}, - polynomial::COLUMNS, + polynomial::KIMCHI_COLS, wires::Wire, }; use ark_ff::PrimeField; @@ -183,14 +183,14 @@ impl CircuitGate { /// Warning: /// - don't forget to set a row of the witness with public input `2^bits -1` and wire it to the second input of the first `Xor16` gate pub fn extend_not_witness_checked_length( - witness: &mut [Vec; COLUMNS], + witness: &mut [Vec; KIMCHI_COLS], input: F, bits: Option, ) { let input = input.to_biguint(); let output = BigUint::bitwise_not(&input, bits); let bits = max(input.bitlen(), bits.unwrap_or(0)); - let mut not_witness: [Vec; COLUMNS] = + let mut not_witness: [Vec; KIMCHI_COLS] = array::from_fn(|_| vec![F::zero(); num_xors(bits) + 1]); init_xor( &mut not_witness, @@ -203,7 +203,7 @@ pub fn extend_not_witness_checked_length( ), ); - for col in 0..COLUMNS { + for col in 0..KIMCHI_COLS { witness[col].extend(not_witness[col].iter()); } } @@ -215,7 +215,7 @@ pub fn extend_not_witness_checked_length( /// Warning: Set public input of bits in public generic gate /// Note: `witness[0][pub] = 2^bits - 1` pub fn extend_not_witness_unchecked_length( - witness: &mut [Vec; COLUMNS], + witness: &mut [Vec; KIMCHI_COLS], inputs: &[F], bits: usize, ) -> Result<(), String> { @@ -235,7 +235,7 @@ pub fn extend_not_witness_unchecked_length( } let all_ones = F::from(2u8).pow([bits as u64]) - F::one(); let rows = (inputs.len() as f64 / 2.0).ceil() as usize; - let mut not_witness: [Vec; COLUMNS] = array::from_fn(|_| vec![F::zero(); rows]); + let mut not_witness: [Vec; KIMCHI_COLS] = array::from_fn(|_| vec![F::zero(); rows]); for (i, input) in inputs.iter().enumerate().step_by(2) { let row = i / 2; // fill in first NOT @@ -253,7 +253,7 @@ pub fn extend_not_witness_unchecked_length( not_witness[5][row] = negated_input2; } } - for col in 0..COLUMNS { + for col in 0..KIMCHI_COLS { witness[col].extend(not_witness[col].iter()); } Ok(()) diff --git a/kimchi/src/circuits/polynomials/permutation.rs b/kimchi/src/circuits/polynomials/permutation.rs index 0301823b37..d2665a5dcb 100644 --- a/kimchi/src/circuits/polynomials/permutation.rs +++ b/kimchi/src/circuits/polynomials/permutation.rs @@ -57,66 +57,69 @@ use ark_poly::{ use ark_poly::{Polynomial, UVPolynomial}; use blake2::{Blake2b512, Digest}; use o1_utils::{ExtendedDensePolynomial, ExtendedEvaluations}; +use poly_commitment::OpenProof; use rand::{CryptoRng, RngCore}; use rayon::prelude::*; use std::array; /// Number of constraints produced by the argument. pub const CONSTRAINTS: u32 = 3; -pub const ZK_ROWS: u64 = 3; + /// Evaluates the polynomial -/// (x - w^{n - 4}) (x - w^{n - 3}) * (x - w^{n - 2}) * (x - w^{n - 1}) -pub fn eval_vanishes_on_last_4_rows(domain: D, x: F) -> F { - let w4 = domain.group_gen.pow([domain.size - (ZK_ROWS + 1)]); - let w3 = domain.group_gen * w4; - let w2 = domain.group_gen * w3; - let w1 = domain.group_gen * w2; - (x - w1) * (x - w2) * (x - w3) * (x - w4) +/// (x - w^{n - i}) * (x - w^{n - i + 1}) * ... * (x - w^{n - 1}) +pub fn eval_vanishes_on_last_n_rows(domain: D, i: u64, x: F) -> F { + if i == 0 { + return F::one(); + } + let mut term = domain.group_gen.pow([domain.size - i]); + let mut acc = x - term; + for _ in 0..i - 1 { + term *= domain.group_gen; + acc *= x - term; + } + acc } /// The polynomial -/// (x - w^{n - 4}) (x - w^{n - 3}) * (x - w^{n - 2}) * (x - w^{n - 1}) -pub fn vanishes_on_last_4_rows(domain: D) -> DensePolynomial { +/// (x - w^{n - i}) * (x - w^{n - i + 1}) * ... * (x - w^{n - 1}) +pub fn vanishes_on_last_n_rows(domain: D, i: u64) -> DensePolynomial { + let constant = |a: F| DensePolynomial::from_coefficients_slice(&[a]); + if i == 0 { + return constant(F::one()); + } let x = DensePolynomial::from_coefficients_slice(&[F::zero(), F::one()]); - let c = |a: F| DensePolynomial::from_coefficients_slice(&[a]); - let w4 = domain.group_gen.pow([domain.size - (ZK_ROWS + 1)]); - let w3 = domain.group_gen * w4; - let w2 = domain.group_gen * w3; - let w1 = domain.group_gen * w2; - &(&(&x - &c(w1)) * &(&x - &c(w2))) * &(&(&x - &c(w3)) * &(&x - &c(w4))) + let mut term = domain.group_gen.pow([domain.size - i]); + let mut acc = &x - &constant(term); + for _ in 0..i - 1 { + term *= domain.group_gen; + acc = &acc * &(&x - &constant(term)); + } + acc } /// Returns the end of the circuit, which is used for introducing zero-knowledge in the permutation polynomial -pub fn zk_w3(domain: D) -> F { - domain.group_gen.pow([domain.size - (ZK_ROWS)]) +pub fn zk_w(domain: D, zk_rows: u64) -> F { + domain.group_gen.pow([domain.size - zk_rows]) } /// Evaluates the polynomial -/// (x - w^{n - 3}) * (x - w^{n - 2}) * (x - w^{n - 1}) -pub fn eval_zk_polynomial(domain: D, x: F) -> F { - let w3 = zk_w3(domain); - let w2 = domain.group_gen * w3; - let w1 = domain.group_gen * w2; - (x - w1) * (x - w2) * (x - w3) +/// (x - w^{n - zk_rows}) * (x - w^{n - zk_rows + 1}) * (x - w^{n - 1}) +pub fn eval_permutation_vanishing_polynomial(domain: D, zk_rows: u64, x: F) -> F { + let term = domain.group_gen.pow([domain.size - zk_rows]); + (x - term) * (x - term * domain.group_gen) * (x - domain.group_gen.pow([domain.size - 1])) } -/// Computes the zero-knowledge polynomial for blinding the permutation polynomial: `(x-w^{n-k})(x-w^{n-k-1})...(x-w^n)`. -/// Currently, we use k = 3 for 2 blinding factors, -/// see -pub fn zk_polynomial(domain: D) -> DensePolynomial { - let w3 = zk_w3(domain); - let w2 = domain.group_gen * w3; - let w1 = domain.group_gen * w2; - - // (x-w3)(x-w2)(x-w1) = - // x^3 - x^2(w1+w2+w3) + x(w1w2+w1w3+w2w3) - w1w2w3 - let w1w2 = w1 * w2; - DensePolynomial::from_coefficients_slice(&[ - -w1w2 * w3, // 1 - w1w2 + (w1 * w3) + (w3 * w2), // x - -w1 - w2 - w3, // x^2 - F::one(), // x^3 - ]) +/// The polynomial +/// (x - w^{n - zk_rows}) * (x - w^{n - zk_rows + 1}) * (x - w^{n - 1}) +pub fn permutation_vanishing_polynomial( + domain: D, + zk_rows: u64, +) -> DensePolynomial { + let constant = |a: F| DensePolynomial::from_coefficients_slice(&[a]); + let x = DensePolynomial::from_coefficients_slice(&[F::zero(), F::one()]); + let term = domain.group_gen.pow([domain.size - zk_rows]); + &(&(&x - &constant(term)) * &(&x - &constant(term * domain.group_gen))) + * &(&x - &constant(domain.group_gen.pow([domain.size - 1]))) } /// Shifts represent the shifts required in the permutation argument of PLONK. @@ -191,7 +194,13 @@ where } } -impl> ProverIndex { +impl< + F: PrimeField, + G: KimchiCurve, + OpeningProof: OpenProof, + const COLUMNS: usize, + > ProverIndex +{ /// permutation quotient poly contribution computation /// /// # Errors @@ -204,7 +213,7 @@ impl> ProverIndex #[allow(clippy::type_complexity)] pub fn perm_quot( &self, - lagrange: &WitnessOverDomains, + lagrange: &WitnessOverDomains, beta: F, gamma: F, z: &DensePolynomial, @@ -214,6 +223,8 @@ impl> ProverIndex let alpha1 = alphas.next().expect("missing power of alpha"); let alpha2 = alphas.next().expect("missing power of alpha"); + let zk_rows = self.cs.zk_rows as usize; + // constant gamma in evaluation form (in domain d8) let gamma = &self.cs.precomputations().constant_1_d8.scale(gamma); @@ -275,7 +286,8 @@ impl> ProverIndex sigmas = &sigmas * &term; } - &(&shifts - &sigmas).scale(alpha0) * &self.cs.precomputations().zkpl + &(&shifts - &sigmas).scale(alpha0) + * &self.cs.precomputations().permutation_vanishing_polynomial_l }; //~ and `bnd`: @@ -301,9 +313,9 @@ impl> ProverIndex return Err(ProverError::Permutation("first division rest")); } - // accumulator end := (z(x) - 1) / (x - sid[n-3]) + // accumulator end := (z(x) - 1) / (x - sid[n-zk_rows]) let denominator = DensePolynomial::from_coefficients_slice(&[ - -self.cs.sid[self.cs.domain.d1.size() - 3], + -self.cs.sid[self.cs.domain.d1.size() - zk_rows], F::one(), ]); let (bnd2, res) = DenseOrSparsePolynomial::divide_with_q_and_r( @@ -324,7 +336,7 @@ impl> ProverIndex /// permutation linearization poly contribution computation pub fn perm_lnrz( &self, - e: &ProofEvaluations>, + e: &ProofEvaluations, COLUMNS>, zeta: F, beta: F, gamma: F, @@ -335,7 +347,11 @@ impl> ProverIndex //~ //~ $\text{scalar} \cdot \sigma_6(x)$ //~ - let zkpm_zeta = self.cs.precomputations().zkpm.evaluate(&zeta); + let zkpm_zeta = self + .cs + .precomputations() + .permutation_vanishing_polynomial_m + .evaluate(&zeta); let scalar = ConstraintSystem::::perm_scalars(e, beta, gamma, alphas, zkpm_zeta); let evals8 = &self.column_evaluations.permutation_coefficients8[PERMUTS - 1].evals; const STRIDE: usize = 8; @@ -349,8 +365,8 @@ impl> ProverIndex } impl ConstraintSystem { - pub fn perm_scalars( - e: &ProofEvaluations>, + pub fn perm_scalars( + e: &ProofEvaluations, COLUMNS>, beta: F, gamma: F, mut alphas: impl Iterator, @@ -390,7 +406,13 @@ impl ConstraintSystem { } } -impl> ProverIndex { +impl< + F: PrimeField, + G: KimchiCurve, + OpeningProof: OpenProof, + const COLUMNS: usize, + > ProverIndex +{ /// permutation aggregation polynomial computation /// /// # Errors @@ -402,13 +424,15 @@ impl> ProverIndex /// Will panic if `first element` is not 1. pub fn perm_aggreg( &self, - witness: &[Vec; W], + witness: &[Vec; COLUMNS], beta: &F, gamma: &F, rng: &mut (impl RngCore + CryptoRng), ) -> Result, ProverError> { let n = self.cs.domain.d1.size(); + let zk_rows = self.cs.zk_rows as usize; + // only works if first element is 1 assert_eq!(self.cs.domain.d1.elements().next(), Some(F::one())); @@ -453,7 +477,7 @@ impl> ProverIndex //~ \end{align} //~ $$ //~ - for j in 0..n - 3 { + for j in 0..n - 1 { z[j + 1] = witness .iter() .zip(self.column_evaluations.permutation_coefficients8.iter()) @@ -461,28 +485,30 @@ impl> ProverIndex .fold(F::one(), |x, y| x * y); } - ark_ff::fields::batch_inversion::(&mut z[1..=n - 3]); + ark_ff::fields::batch_inversion::(&mut z[1..n]); - for j in 0..n - 3 { - let x = z[j]; - z[j + 1] *= witness - .iter() - .zip(self.cs.shift.iter()) - .map(|(w, s)| w[j] + (self.cs.sid[j] * beta * s) + gamma) - .fold(x, |z, y| z * y); + //~ We randomize the evaluations at `n - zk_rows + 1` and `n - zk_rows + 2` in order to add + //~ zero-knowledge to the protocol. + //~ + for j in 0..n - 1 { + if j != n - zk_rows && j != n - zk_rows + 1 { + let x = z[j]; + z[j + 1] *= witness + .iter() + .zip(self.cs.shift.iter()) + .map(|(w, s)| w[j] + (self.cs.sid[j] * beta * s) + gamma) + .fold(x, |z, y| z * y); + } else { + z[j + 1] = F::rand(rng); + } } - //~ If computed correctly, we should have $z(g^{n-3}) = 1$. + //~ For a valid witness, we then have have $z(g^{n-zk_rows}) = 1$. //~ - if z[n - 3] != F::one() { + if z[n - zk_rows] != F::one() { return Err(ProverError::Permutation("final value")); }; - //~ Finally, randomize the last `EVAL_POINTS` evaluations $z(g^{n-2})$ and $z(g^{n-1})$, - //~ in order to add zero-knowledge to the protocol. - z[n - 2] = F::rand(rng); - z[n - 1] = F::rand(rng); - let res = Evaluations::>::from_vec_and_domain(z, self.cs.domain.d1).interpolate(); Ok(res) } diff --git a/kimchi/src/circuits/polynomials/poseidon.rs b/kimchi/src/circuits/polynomials/poseidon.rs index ef5d010dc9..b690ce45fd 100644 --- a/kimchi/src/circuits/polynomials/poseidon.rs +++ b/kimchi/src/circuits/polynomials/poseidon.rs @@ -30,7 +30,7 @@ use crate::{ argument::{Argument, ArgumentEnv, ArgumentType}, expr::{constraints::ExprOps, Cache}, gate::{CircuitGate, CurrOrNext, GateType}, - polynomial::COLUMNS, + polynomial::KIMCHI_COLS, wires::{GateWires, Wire}, }, curve::KimchiCurve, @@ -51,7 +51,7 @@ use CurrOrNext::{Curr, Next}; pub const SPONGE_WIDTH: usize = PlonkSpongeConstantsKimchi::SPONGE_WIDTH; /// Number of rows -pub const ROUNDS_PER_ROW: usize = COLUMNS / SPONGE_WIDTH; +pub const ROUNDS_PER_ROW: usize = KIMCHI_COLS / SPONGE_WIDTH; /// Number of rounds pub const ROUNDS_PER_HASH: usize = PlonkSpongeConstantsKimchi::PERM_ROUNDS_FULL; @@ -137,11 +137,11 @@ impl CircuitGate { /// # Errors /// /// Will give error if `self.typ` is not `Poseidon` gate, or `state` does not match after `permutation`. - pub fn verify_poseidon>( + pub fn verify_poseidon, const COLUMNS: usize>( &self, row: usize, // TODO(mimoo): we should just pass two rows instead of the whole witness - witness: &[Vec; W], + witness: &[Vec; COLUMNS], ) -> Result<(), String> { ensure_eq!( self.typ, @@ -226,7 +226,7 @@ impl CircuitGate { pub fn generate_witness( row: usize, params: &'static ArithmeticSpongeParams, - witness_cols: &mut [Vec; COLUMNS], + witness_cols: &mut [Vec; KIMCHI_COLS], input: [F; SPONGE_WIDTH], ) { // add the input into the witness @@ -337,7 +337,10 @@ where const ARGUMENT_TYPE: ArgumentType = ArgumentType::Gate(GateType::Poseidon); const CONSTRAINTS: u32 = 15; - fn constraint_checks>(env: &ArgumentEnv, cache: &mut Cache) -> Vec { + fn constraint_checks, const COLUMNS: usize>( + env: &ArgumentEnv, + cache: &mut Cache, + ) -> Vec { let mut res = vec![]; let mut idx = 0; diff --git a/kimchi/src/circuits/polynomials/range_check/circuitgates.rs b/kimchi/src/circuits/polynomials/range_check/circuitgates.rs index fd56e6514f..6e4b3def8e 100644 --- a/kimchi/src/circuits/polynomials/range_check/circuitgates.rs +++ b/kimchi/src/circuits/polynomials/range_check/circuitgates.rs @@ -123,7 +123,7 @@ use crate::circuits::{ Cache, }, gate::GateType, - polynomial::COLUMNS, + polynomial::KIMCHI_COLS, }; use ark_ff::PrimeField; @@ -178,7 +178,10 @@ where // * Operates on Curr row // * Range constrain all limbs except vp0 and vp1 (barring plookup constraints, which are done elsewhere) // * Constrain that combining all limbs equals the limb stored in column 0 - fn constraint_checks>(env: &ArgumentEnv, _cache: &mut Cache) -> Vec { + fn constraint_checks, const COLUMNS: usize>( + env: &ArgumentEnv, + _cache: &mut Cache, + ) -> Vec { // 1) Apply range constraints on the limbs // * Columns 1-2 are 12-bit copy constraints // * They are copied 3 rows ahead (to the final row) and are constrained by lookups @@ -187,7 +190,7 @@ where // a single 64-bit range check // * Columns 3-6 are 12-bit plookup range constraints (these are specified in the lookup gate) // * Columns 7-14 are 2-bit crumb range constraints - let mut constraints = (7..COLUMNS) + let mut constraints = (7..KIMCHI_COLS) .map(|i| crumb(&env.witness_curr(i))) .collect::>(); @@ -205,7 +208,7 @@ where let mut sum_of_limbs = T::zero(); // Sum 2-bit limbs - for i in (7..COLUMNS).rev() { + for i in (7..KIMCHI_COLS).rev() { sum_of_limbs += power_of_2.clone() * env.witness_curr(i); power_of_2 *= T::from(4u64); // 2 bits } @@ -279,7 +282,10 @@ where // * Operates on Curr and Next row // * Range constrain all limbs (barring plookup constraints, which are done elsewhere) // * Constrain that combining all limbs equals the value v2 stored in row Curr, column 0 - fn constraint_checks>(env: &ArgumentEnv, _cache: &mut Cache) -> Vec { + fn constraint_checks, const COLUMNS: usize>( + env: &ArgumentEnv, + _cache: &mut Cache, + ) -> Vec { // 1) Apply range constraints on limbs for Curr row // * Column 2 is a 2-bit crumb let mut constraints = vec![crumb(&env.witness_curr(2))]; @@ -288,7 +294,7 @@ where // in the lookup gate) // * Columns 7-14 are 2-bit crumb range constraints constraints.append( - &mut (7..COLUMNS) + &mut (7..KIMCHI_COLS) .map(|i| crumb(&env.witness_curr(i))) .collect::>(), ); @@ -304,7 +310,7 @@ where // are specified in the lookup gate) // * Columns 7-14 are more 2-bit crumbs constraints.append( - &mut (7..COLUMNS) + &mut (7..KIMCHI_COLS) .map(|i| crumb(&env.witness_next(i))) .collect::>(), ); @@ -325,7 +331,7 @@ where let mut sum_of_limbs = T::zero(); // Next row: Sum 2-bit limbs - for i in (7..COLUMNS).rev() { + for i in (7..KIMCHI_COLS).rev() { sum_of_limbs += power_of_2.clone() * env.witness_next(i); power_of_2 *= 4u64.into(); // 2 bits } @@ -337,7 +343,7 @@ where } // Curr row: Sum 2-bit limbs - for i in (7..COLUMNS).rev() { + for i in (7..KIMCHI_COLS).rev() { sum_of_limbs += power_of_2.clone() * env.witness_curr(i); power_of_2 *= 4u64.into(); // 2 bits } diff --git a/kimchi/src/circuits/polynomials/range_check/gadget.rs b/kimchi/src/circuits/polynomials/range_check/gadget.rs index 3b787b0ff2..0a18b2b2b4 100644 --- a/kimchi/src/circuits/polynomials/range_check/gadget.rs +++ b/kimchi/src/circuits/polynomials/range_check/gadget.rs @@ -149,7 +149,7 @@ pub fn circuit_gate_constraints( } /// Get the combined constraints for all range check circuit gate types -pub fn combined_constraints( +pub fn combined_constraints( alphas: &Alphas, cache: &mut Cache, ) -> E { diff --git a/kimchi/src/circuits/polynomials/range_check/witness.rs b/kimchi/src/circuits/polynomials/range_check/witness.rs index e6fd60987f..d970338818 100644 --- a/kimchi/src/circuits/polynomials/range_check/witness.rs +++ b/kimchi/src/circuits/polynomials/range_check/witness.rs @@ -11,7 +11,7 @@ use crate::circuits::witness::Variables; use crate::variable_map; use crate::{ circuits::{ - polynomial::COLUMNS, + polynomial::KIMCHI_COLS, witness::{init_row, CopyBitsCell, CopyCell, VariableCell, WitnessCell}, }, variables, @@ -29,7 +29,7 @@ use o1_utils::foreign_field::BigUintForeignFieldHelpers; /// For example, we can convert the `RangeCheck0` circuit gate into /// a 64-bit lookup by adding two copy constraints to constrain /// columns 1 and 2 to zero. -fn layout() -> [Vec>>; 4] { +fn layout() -> [Vec>>; 4] { [ /* row 1, RangeCheck0 row */ range_check_0_row("v0", 0), @@ -86,7 +86,7 @@ fn layout() -> [Vec>>; 4] { pub fn range_check_0_row( limb_name: &'static str, row: usize, -) -> Vec>> { +) -> Vec>> { vec![ VariableCell::create(limb_name), /* 12-bit copies */ @@ -114,9 +114,9 @@ pub fn range_check_0_row( } /// Create a multi range check witness from three 88-bit values: v0, v1 and v2 -pub fn create_multi(v0: F, v1: F, v2: F) -> [Vec; COLUMNS] { +pub fn create_multi(v0: F, v1: F, v2: F) -> [Vec; KIMCHI_COLS] { let layout = layout(); - let mut witness: [Vec; COLUMNS] = array::from_fn(|_| vec![F::zero(); 4]); + let mut witness: [Vec; KIMCHI_COLS] = array::from_fn(|_| vec![F::zero(); 4]); init_row(&mut witness, 0, 0, &layout, &variables!(v0)); init_row(&mut witness, 0, 1, &layout, &variables!(v1)); @@ -134,9 +134,9 @@ pub fn create_multi(v0: F, v1: F, v2: F) -> [Vec; COLUMNS] { /// Create a multi range check witness from two limbs: v01 (176 bits), v2 (88 bits), /// where v2 is the most significant limb and v01 is the least significant limb -pub fn create_multi_compact(v01: F, v2: F) -> [Vec; COLUMNS] { +pub fn create_multi_compact(v01: F, v2: F) -> [Vec; KIMCHI_COLS] { let layout = layout(); - let mut witness: [Vec; COLUMNS] = array::from_fn(|_| vec![F::zero(); 4]); + let mut witness: [Vec; KIMCHI_COLS] = array::from_fn(|_| vec![F::zero(); 4]); let (v1, v0) = v01.to_biguint().div_rem(&BigUint::two_to_limb()); let v0: F = v0.to_field().expect("failed to convert to field element"); @@ -158,20 +158,20 @@ pub fn create_multi_compact(v01: F, v2: F) -> [Vec; COLUMNS] { } /// Create a multi range check witness from limbs -pub fn create_multi_limbs(limbs: &[F; 3]) -> [Vec; COLUMNS] { +pub fn create_multi_limbs(limbs: &[F; 3]) -> [Vec; KIMCHI_COLS] { create_multi(limbs[0], limbs[1], limbs[2]) } /// Create a multi range check witness from compact limbs -pub fn create_multi_compact_limbs(limbs: &[F; 2]) -> [Vec; COLUMNS] { +pub fn create_multi_compact_limbs(limbs: &[F; 2]) -> [Vec; KIMCHI_COLS] { create_multi_compact(limbs[0], limbs[1]) } /// Create a single range check witness /// Input: 88-bit value v0 -pub fn create(v0: F) -> [Vec; COLUMNS] { +pub fn create(v0: F) -> [Vec; KIMCHI_COLS] { let layout = vec![range_check_0_row("v0", 0)]; - let mut witness: [Vec; COLUMNS] = array::from_fn(|_| vec![F::zero()]); + let mut witness: [Vec; KIMCHI_COLS] = array::from_fn(|_| vec![F::zero()]); init_row(&mut witness, 0, 0, &layout, &variables!(v0)); @@ -179,58 +179,61 @@ pub fn create(v0: F) -> [Vec; COLUMNS] { } /// Extend an existing witness with a multi-range-check gadget for three 88-bit values: v0, v1 and v2 -pub fn extend_multi(witness: &mut [Vec; COLUMNS], v0: F, v1: F, v2: F) { +pub fn extend_multi(witness: &mut [Vec; KIMCHI_COLS], v0: F, v1: F, v2: F) { let limbs_witness = create_multi(v0, v1, v2); - for col in 0..COLUMNS { + for col in 0..KIMCHI_COLS { witness[col].extend(limbs_witness[col].iter()) } } /// Extend and existing witness with a multi range check witness for two limbs: v01 (176 bits), v2 (88 bits), /// where v2 is the most significant limb and v01 is the least significant limb -pub fn extend_multi_compact(witness: &mut [Vec; COLUMNS], v01: F, v2: F) { +pub fn extend_multi_compact(witness: &mut [Vec; KIMCHI_COLS], v01: F, v2: F) { let limbs_witness = create_multi_compact(v01, v2); - for col in 0..COLUMNS { + for col in 0..KIMCHI_COLS { witness[col].extend(limbs_witness[col].iter()) } } /// Extend an existing witness with a multi-range-check gadget for limbs -pub fn extend_multi_limbs(witness: &mut [Vec; COLUMNS], limbs: &[F; 3]) { +pub fn extend_multi_limbs(witness: &mut [Vec; KIMCHI_COLS], limbs: &[F; 3]) { let limbs_witness = create_multi_limbs(limbs); - for col in 0..COLUMNS { + for col in 0..KIMCHI_COLS { witness[col].extend(limbs_witness[col].iter()) } } /// Extend an existing witness with a multi-range-check gadget for compact limbs -pub fn extend_multi_compact_limbs(witness: &mut [Vec; COLUMNS], limbs: &[F; 2]) { +pub fn extend_multi_compact_limbs( + witness: &mut [Vec; KIMCHI_COLS], + limbs: &[F; 2], +) { let limbs_witness = create_multi_compact_limbs(limbs); - for col in 0..COLUMNS { + for col in 0..KIMCHI_COLS { witness[col].extend(limbs_witness[col].iter()) } } /// Extend an existing witness with a multi-range-check gadget for ForeignElement pub fn extend_multi_from_fe( - witness: &mut [Vec; COLUMNS], + witness: &mut [Vec; KIMCHI_COLS], fe: &ForeignElement, ) { extend_multi(witness, fe.limbs[0], fe.limbs[1], fe.limbs[2]); } /// Extend an existing witness with a single range check witness for foreign field element -pub fn extend(witness: &mut [Vec; COLUMNS], fe: F) { +pub fn extend(witness: &mut [Vec; KIMCHI_COLS], fe: F) { let limbs_witness = create(fe); - for col in 0..COLUMNS { + for col in 0..KIMCHI_COLS { witness[col].extend(limbs_witness[col].iter()) } } /// Extend an existing witness with a single-range-check gate for 88bits -pub fn extend_single(witness: &mut [Vec; COLUMNS], elem: F) { +pub fn extend_single(witness: &mut [Vec; KIMCHI_COLS], elem: F) { let single_wit = create(elem); - for col in 0..COLUMNS { + for col in 0..KIMCHI_COLS { witness[col].extend(single_wit[col].iter()) } } diff --git a/kimchi/src/circuits/polynomials/rot.rs b/kimchi/src/circuits/polynomials/rot.rs index a0bc442a24..e1ef1e4acb 100644 --- a/kimchi/src/circuits/polynomials/rot.rs +++ b/kimchi/src/circuits/polynomials/rot.rs @@ -13,7 +13,7 @@ use crate::{ self, tables::{GateLookupTable, LookupTable}, }, - polynomial::COLUMNS, + polynomial::KIMCHI_COLS, wires::Wire, witness::{self, VariableBitsCell, VariableCell, Variables, WitnessCell}, }, @@ -213,10 +213,13 @@ where // (stored in coefficient as a power-of-two form) // * Operates on Curr row // * Shifts the words by `rot` bits and then adds the excess to obtain the rotated word. - fn constraint_checks>(env: &ArgumentEnv, _cache: &mut Cache) -> Vec { + fn constraint_checks, const COLUMNS: usize>( + env: &ArgumentEnv, + _cache: &mut Cache, + ) -> Vec { // Check that the last 8 columns are 2-bit crumbs // C1..C8: x * (x - 1) * (x - 2) * (x - 3) = 0 - let mut constraints = (7..COLUMNS) + let mut constraints = (7..KIMCHI_COLS) .map(|i| crumb(&env.witness_curr(i))) .collect::>(); @@ -244,7 +247,7 @@ where let mut bound = T::zero(); // Sum 2-bit limbs - for i in (7..COLUMNS).rev() { + for i in (7..KIMCHI_COLS).rev() { bound += power_of_2.clone() * env.witness_curr(i); power_of_2 *= T::two_pow(2); // 2 bits } @@ -266,7 +269,7 @@ where // ROTATION WITNESS COMPUTATION -fn layout_rot64(curr_row: usize) -> [Vec>>; 3] { +fn layout_rot64(curr_row: usize) -> [Vec>>; 3] { [ rot_row(), range_check_0_row("shifted", curr_row + 1), @@ -274,7 +277,7 @@ fn layout_rot64(curr_row: usize) -> [Vec() -> Vec>> { +fn rot_row() -> Vec>> { vec![ VariableCell::create("word"), VariableCell::create("rotated"), @@ -297,7 +300,7 @@ fn rot_row() -> Vec>> { } fn init_rot64( - witness: &mut [Vec; COLUMNS], + witness: &mut [Vec; KIMCHI_COLS], curr_row: usize, word: F, rotated: F, @@ -323,13 +326,13 @@ fn init_rot64( /// Warning: /// - don't forget to include a public input row with zero value pub fn extend_rot( - witness: &mut [Vec; COLUMNS], + witness: &mut [Vec; KIMCHI_COLS], word: u64, rot: u32, side: RotMode, ) { - assert!(rot < 64, "Rotation value must be less than 64"); - assert_ne!(rot, 0, "Rotation value must be non-zero"); + assert!(rot <= 64, "Rotation value must be less or equal than 64"); + let rot = if side == RotMode::Right { 64 - rot } else { @@ -343,16 +346,16 @@ pub fn extend_rot( // shifted [------] * 2^rot // rot = [------|000] // + [---] excess - let shifted = (word as u128 * 2u128.pow(rot) % 2u128.pow(64)) as u64; - let excess = word / 2u64.pow(64 - rot); + let shifted = (word as u128) * 2u128.pow(rot) % 2u128.pow(64); + let excess = (word as u128) / 2u128.pow(64 - rot); let rotated = shifted + excess; // Value for the added value for the bound // Right input of the "FFAdd" for the bound equation let bound = 2u128.pow(64) - 2u128.pow(rot); let rot_row = witness[0].len(); - let rot_witness: [Vec; COLUMNS] = array::from_fn(|_| vec![F::zero(); 3]); - for col in 0..COLUMNS { + let rot_witness: [Vec; KIMCHI_COLS] = array::from_fn(|_| vec![F::zero(); 3]); + for col in 0..KIMCHI_COLS { witness[col].extend(rot_witness[col].iter()); } init_rot64( diff --git a/kimchi/src/circuits/polynomials/turshi.rs b/kimchi/src/circuits/polynomials/turshi.rs index abb0a7266f..6a8a76fd85 100644 --- a/kimchi/src/circuits/polynomials/turshi.rs +++ b/kimchi/src/circuits/polynomials/turshi.rs @@ -82,8 +82,9 @@ use crate::{ alphas::Alphas, circuits::{ argument::{Argument, ArgumentEnv, ArgumentType}, + berkeley_columns::Column, constraints::ConstraintSystem, - expr::{self, constraints::ExprOps, Cache, Column, E}, + expr::{self, constraints::ExprOps, Cache, E}, gate::{CircuitGate, GateType}, wires::{GateWires, Wire}, }, @@ -176,15 +177,15 @@ impl CircuitGate { /// # Panics /// /// Will panic if `constraint linearization` fails. - pub fn verify_cairo_gate>( + pub fn verify_cairo_gate, const COLUMNS: usize>( &self, row: usize, - witness: &[Vec; W], + witness: &[Vec; COLUMNS], cs: &ConstraintSystem, ) -> Result<(), String> { // assignments - let curr: [F; W] = array::from_fn(|i| witness[i][row]); - let mut next: [F; W] = array::from_fn(|_| F::zero()); + let curr: [F; COLUMNS] = array::from_fn(|i| witness[i][row]); + let mut next: [F; COLUMNS] = array::from_fn(|_| F::zero()); if self.typ != GateType::Zero { next = array::from_fn(|i| witness[i][row + 1]); } @@ -192,7 +193,7 @@ impl CircuitGate { // column polynomials let polys = { let mut h = std::collections::HashSet::new(); - for i in 0..W { + for i in 0..COLUMNS { h.insert(Column::Witness(i)); // column witness polynomials } // gate selector polynomials @@ -208,8 +209,11 @@ impl CircuitGate { alphas.register(ArgumentType::Gate(self.typ), Instruction::::CONSTRAINTS); // Get constraints for this circuit gate - let constraints = - circuit_gate_combined_constraints::(self.typ, &alphas, &mut Cache::default()); + let constraints = circuit_gate_combined_constraints::( + self.typ, + &alphas, + &mut Cache::default(), + ); // Linearize let linearized = constraints.linearize(polys).unwrap(); @@ -226,6 +230,7 @@ impl CircuitGate { joint_combiner: None, endo_coefficient: cs.endo, mds: &G::sponge_params().mds, + zk_rows: 3, }; let pt = F::rand(rng); @@ -254,7 +259,9 @@ pub mod witness { use super::*; /// Returns the witness of an execution of a Cairo program in `CircuitGate` format - pub fn cairo_witness(prog: &CairoProgram) -> [Vec; W] { + pub fn cairo_witness( + prog: &CairoProgram, + ) -> [Vec; COLUMNS] { // 0: 1 row for final check CairoClaim gate // 4i+1: 1 row per instruction for CairoInstruction gate // 4i+2: 1 row per instruction for Flags argument @@ -266,34 +273,34 @@ pub mod witness { let n = prog.trace().len(); let rows = 4 * n - 1; let mut table: Vec> = vec![vec![]]; - table.resize(rows, vec![F::zero(); W]); + table.resize(rows, vec![F::zero(); COLUMNS]); for (i, inst) in prog.trace().iter().enumerate() { if i == 0 { let claim_wit = claim_witness(prog); table[i] = claim_wit; } - let ins_wit = instruction_witness::(inst); - let flg_wit = flag_witness::(inst); + let ins_wit = instruction_witness::(inst); + let flg_wit = flag_witness::(inst); table[4 * i + 1] = ins_wit; table[4 * i + 2] = flg_wit; if i != n - 1 { // all but last instruction - let tra_wit = transition_witness::(inst, &prog.trace()[i + 1]); - let aux_wit = auxiliary_witness::(&prog.trace()[i + 1]); + let tra_wit = transition_witness::(inst, &prog.trace()[i + 1]); + let aux_wit = auxiliary_witness::(&prog.trace()[i + 1]); table[4 * i + 3] = tra_wit; table[4 * i + 4] = aux_wit; } } - let mut witness: Vec> = vec![vec![]; W]; - for col in 0..W { + let mut witness: Vec> = vec![vec![]; COLUMNS]; + for col in 0..COLUMNS { // initialize column with zeroes witness[col].resize(table.len(), F::zero()); for (row, wit) in table.iter().enumerate() { witness[col][row] = wit[col]; } } - let witness: [Vec; W] = array::from_fn(|i| witness[i].clone()); + let witness: [Vec; COLUMNS] = array::from_fn(|i| witness[i].clone()); witness } @@ -318,7 +325,7 @@ pub mod witness { ] } - fn instruction_witness(inst: &CairoInstruction) -> Vec { + fn instruction_witness(inst: &CairoInstruction) -> Vec { vec![ inst.pc(), inst.ap(), @@ -338,7 +345,7 @@ pub mod witness { ] } - fn flag_witness(inst: &CairoInstruction) -> Vec { + fn flag_witness(inst: &CairoInstruction) -> Vec { vec![ inst.f_dst_fp(), inst.f_op0_fp(), @@ -358,7 +365,7 @@ pub mod witness { ] } - fn transition_witness( + fn transition_witness( curr: &CairoInstruction, next: &CairoInstruction, ) -> Vec { @@ -381,7 +388,7 @@ pub mod witness { ] } - fn auxiliary_witness(next: &CairoInstruction) -> Vec { + fn auxiliary_witness(next: &CairoInstruction) -> Vec { vec![ next.pc(), next.ap(), @@ -410,30 +417,30 @@ pub mod testing { /// # Errors /// /// Will give error if `gate` is not `Cairo`-related gate or `zero` gate. - pub fn ensure_cairo_gate( + pub fn ensure_cairo_gate( gate: &CircuitGate, row: usize, - witness: &[Vec; W], + witness: &[Vec; COLUMNS], //_cs: &ConstraintSystem, ) -> Result<(), String> { // assignments - let this: [F; W] = array::from_fn(|i| witness[i][row]); + let this: [F; COLUMNS] = array::from_fn(|i| witness[i][row]); match gate.typ { GateType::CairoClaim => { - let next: [F; W] = array::from_fn(|i| witness[i][row + 1]); + let next: [F; COLUMNS] = array::from_fn(|i| witness[i][row + 1]); ensure_claim(&this, &next) // CircuitGate::ensure_transition(&this), } GateType::CairoInstruction => { - let next: [F; W] = array::from_fn(|i| witness[i][row + 1]); + let next: [F; COLUMNS] = array::from_fn(|i| witness[i][row + 1]); ensure_instruction(&this, &next) } GateType::CairoFlags => { - let next: [F; W] = array::from_fn(|i| witness[i][row + 1]); + let next: [F; COLUMNS] = array::from_fn(|i| witness[i][row + 1]); ensure_flags(&this, &next) } GateType::CairoTransition => { - let next: [F; W] = array::from_fn(|i| witness[i][row + 1]); + let next: [F; COLUMNS] = array::from_fn(|i| witness[i][row + 1]); ensure_transition(&this, &next) } GateType::Zero => Ok(()), @@ -739,7 +746,7 @@ fn two>() -> T { /// # Panics /// /// Will panic if the `typ` is not `Cairo`-related gate type or `zero` gate type. -pub fn circuit_gate_combined_constraints( +pub fn circuit_gate_combined_constraints( typ: GateType, alphas: &Alphas, cache: &mut Cache, @@ -765,7 +772,10 @@ where /// Generates the constraints for the Cairo initial claim and first memory checks /// Accesses Curr and Next rows - fn constraint_checks>(env: &ArgumentEnv, _cache: &mut Cache) -> Vec { + fn constraint_checks, const COLUMNS: usize>( + env: &ArgumentEnv, + _cache: &mut Cache, + ) -> Vec { let pc_ini = env.witness_curr(0); // copy from public input let ap_ini = env.witness_curr(1); // copy from public input let pc_fin = env.witness_curr(2); // copy from public input @@ -802,7 +812,10 @@ where /// Generates the constraints for the Cairo instruction /// Accesses Curr and Next rows - fn constraint_checks>(env: &ArgumentEnv, cache: &mut Cache) -> Vec { + fn constraint_checks, const COLUMNS: usize>( + env: &ArgumentEnv, + cache: &mut Cache, + ) -> Vec { // load all variables of the witness corresponding to Cairoinstruction gates let pc = env.witness_curr(0); let ap = env.witness_curr(1); @@ -948,7 +961,10 @@ where /// Generates the constraints for the Cairo flags /// Accesses Curr and Next rows - fn constraint_checks>(env: &ArgumentEnv, _cache: &mut Cache) -> Vec { + fn constraint_checks, const COLUMNS: usize>( + env: &ArgumentEnv, + _cache: &mut Cache, + ) -> Vec { // Load current row let f_pc_abs = env.witness_curr(7); let f_pc_rel = env.witness_curr(8); @@ -1015,7 +1031,10 @@ where /// Generates the constraints for the Cairo transition /// Accesses Curr and Next rows (Next only first 3 entries) - fn constraint_checks>(env: &ArgumentEnv, _cache: &mut Cache) -> Vec { + fn constraint_checks, const COLUMNS: usize>( + env: &ArgumentEnv, + _cache: &mut Cache, + ) -> Vec { // load computed updated registers let pcup = env.witness_curr(7); let apup = env.witness_curr(8); diff --git a/kimchi/src/circuits/polynomials/varbasemul.rs b/kimchi/src/circuits/polynomials/varbasemul.rs index 322596cd1f..9f9e524c0b 100644 --- a/kimchi/src/circuits/polynomials/varbasemul.rs +++ b/kimchi/src/circuits/polynomials/varbasemul.rs @@ -12,7 +12,8 @@ use crate::circuits::{ argument::{Argument, ArgumentEnv, ArgumentType}, - expr::{constraints::ExprOps, Cache, Column, Variable}, + berkeley_columns::Column, + expr::{constraints::ExprOps, Cache, Variable as VariableGen}, gate::{CircuitGate, CurrOrNext, GateType}, wires::GateWires, }; @@ -20,6 +21,8 @@ use ark_ff::{FftField, PrimeField}; use std::marker::PhantomData; use CurrOrNext::{Curr, Next}; +type Variable = VariableGen; + //~ We implement custom Plonk constraints for short Weierstrass curve variable base scalar multiplication. //~ //~ Given a finite field $\mathbb{F}_q$ of order $q$, if the order is not a multiple of 2 nor 3, then an @@ -142,10 +145,10 @@ impl CircuitGate { /// # Errors /// /// TODO - pub fn verify_vbmul( + pub fn verify_vbmul( &self, _row: usize, - _witness: &[Vec; W], + _witness: &[Vec; COLUMNS], ) -> Result<(), String> { // TODO: implement Ok(()) @@ -173,12 +176,15 @@ impl Point { } impl Point { - pub fn new_from_env>(&self, env: &ArgumentEnv) -> Point { + pub fn new_from_env, const COLUMNS: usize>( + &self, + env: &ArgumentEnv, + ) -> Point { Point::create(self.x.new_from_env(env), self.y.new_from_env(env)) } } -fn set(w: &mut [Vec; W], row0: usize, var: Variable, x: F) { +fn set(w: &mut [Vec; COLUMNS], row0: usize, var: Variable, x: F) { match var.col { Column::Witness(i) => w[i][row0 + var.row.shift()] = x, _ => panic!("Can only set witness columns"), @@ -186,8 +192,8 @@ fn set(w: &mut [Vec; W], row0: usize, var: Variable, x: F) } #[allow(clippy::too_many_arguments)] -fn single_bit_witness( - w: &mut [Vec; W], +fn single_bit_witness( + w: &mut [Vec; COLUMNS], row: usize, b: Variable, base: &Point, @@ -284,7 +290,7 @@ trait FromWitness where F: PrimeField, { - fn new_from_env(&self, env: &ArgumentEnv) -> T; + fn new_from_env(&self, env: &ArgumentEnv) -> T; } impl FromWitness for Variable @@ -292,7 +298,7 @@ where F: PrimeField, T: ExprOps, { - fn new_from_env(&self, env: &ArgumentEnv) -> T { + fn new_from_env(&self, env: &ArgumentEnv) -> T { let column_to_index = |_| match self.col { Column::Witness(i) => i, _ => panic!("Can't get index from witness columns"), @@ -328,7 +334,10 @@ impl Layout { } } - fn new_from_env>(&self, env: &ArgumentEnv) -> Layout { + fn new_from_env, const COLUMNS: usize>( + &self, + env: &ArgumentEnv, + ) -> Layout { Layout { accs: self.accs.map(|point| point.new_from_env(env)), bits: self.bits.map(|var| var.new_from_env(env)), @@ -361,8 +370,8 @@ pub struct VarbaseMulResult { /// # Panics /// /// Will panic if `bits chunk` length validation fails. -pub fn witness( - w: &mut [Vec; W], +pub fn witness( + w: &mut [Vec; COLUMNS], row0: usize, base: (F, F), bits: &[bool], @@ -411,7 +420,10 @@ where const ARGUMENT_TYPE: ArgumentType = ArgumentType::Gate(GateType::VarBaseMul); const CONSTRAINTS: u32 = 21; - fn constraint_checks>(env: &ArgumentEnv, cache: &mut Cache) -> Vec { + fn constraint_checks, const COLUMNS: usize>( + env: &ArgumentEnv, + cache: &mut Cache, + ) -> Vec { let Layout { base, accs, @@ -419,7 +431,7 @@ where ss, n_prev, n_next, - } = Layout::create().new_from_env::(env); + } = Layout::create().new_from_env::(env); // n' // = 2^5 * n + 2^4 b0 + 2^3 b1 + 2^2 b2 + 2^1 b3 + b4 diff --git a/kimchi/src/circuits/polynomials/xor.rs b/kimchi/src/circuits/polynomials/xor.rs index 905a7a90ab..630df1d2c9 100644 --- a/kimchi/src/circuits/polynomials/xor.rs +++ b/kimchi/src/circuits/polynomials/xor.rs @@ -149,7 +149,10 @@ where // * Operates on Curr and Next rows // * Constrain the decomposition of `in1`, `in2` and `out` of multiples of 16 bits // * The actual XOR is performed thanks to the plookups of 4-bit XORs. - fn constraint_checks>(env: &ArgumentEnv, _cache: &mut Cache) -> Vec { + fn constraint_checks, const COLUMNS: usize>( + env: &ArgumentEnv, + _cache: &mut Cache, + ) -> Vec { let two = T::from(2u64); // in1 = in1_0 + in1_1 * 2^4 + in1_2 * 2^8 + in1_3 * 2^12 + next_in1 * 2^16 // in2 = in2_0 + in2_1 * 2^4 + in2_2 * 2^8 + in2_3 * 2^12 + next_in2 * 2^16 @@ -168,10 +171,10 @@ where } // Witness layout -fn layout( +fn layout( curr_row: usize, bits: usize, -) -> Vec>>> { +) -> Vec>>> { let num_xor = num_xors(bits); let mut layout = (0..num_xor) .map(|i| xor_row(i, curr_row + i)) @@ -180,10 +183,10 @@ fn layout( layout } -fn xor_row( +fn xor_row( nybble: usize, curr_row: usize, -) -> Vec>> { +) -> Vec>> { let start = nybble * 16; vec![ VariableBitsCell::create("in1", start, None), @@ -204,7 +207,7 @@ fn xor_row( ] } -fn zero_row() -> Vec>> { +fn zero_row() -> Vec>> { vec![ ConstantCell::create(F::zero()), ConstantCell::create(F::zero()), @@ -224,8 +227,8 @@ fn zero_row() -> Vec ] } -pub(crate) fn init_xor( - witness: &mut [Vec; W], +pub(crate) fn init_xor( + witness: &mut [Vec; COLUMNS], curr_row: usize, bits: usize, words: (F, F, F), @@ -242,14 +245,14 @@ pub(crate) fn init_xor( /// Extends the Xor rows to the full witness /// Panics if the words are larger than the desired bits -pub fn extend_xor_witness( - witness: &mut [Vec; W], +pub fn extend_xor_witness( + witness: &mut [Vec; COLUMNS], input1: F, input2: F, bits: usize, ) { - let xor_witness = create_xor_witness::(input1, input2, bits); - for col in 0..W { + let xor_witness = create_xor_witness::(input1, input2, bits); + for col in 0..COLUMNS { witness[col].extend(xor_witness[col].iter()); } } @@ -257,11 +260,11 @@ pub fn extend_xor_witness( /// Create a Xor for up to the native length starting at row 0 /// Input: first input and second input, bits length, current row /// Panics if the desired bits is smaller than the inputs length -pub fn create_xor_witness( +pub fn create_xor_witness( input1: F, input2: F, bits: usize, -) -> [Vec; W] { +) -> [Vec; COLUMNS] { let input1_big = input1.to_biguint(); let input2_big = input2.to_biguint(); if bits < input1_big.bitlen() || bits < input2_big.bitlen() { @@ -269,7 +272,8 @@ pub fn create_xor_witness( } let output = BigUint::bitwise_xor(&input1_big, &input2_big); - let mut xor_witness: [Vec; W] = array::from_fn(|_| vec![F::zero(); 1 + num_xors(bits)]); + let mut xor_witness: [Vec; COLUMNS] = + array::from_fn(|_| vec![F::zero(); 1 + num_xors(bits)]); init_xor( &mut xor_witness, diff --git a/kimchi/src/circuits/wires.rs b/kimchi/src/circuits/wires.rs index 1ab40d4b83..ff8c258078 100644 --- a/kimchi/src/circuits/wires.rs +++ b/kimchi/src/circuits/wires.rs @@ -6,13 +6,13 @@ use std::array; use std::io::{Read, Result as IoResult, Write}; /// Number of registers -pub const COLUMNS: usize = 15; +pub const KIMCHI_COLS: usize = 15; /// Number of registers that can be wired (participating in the permutation) pub const PERMUTS: usize = 7; /// index of all registers -pub const WIRES: [usize; COLUMNS] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]; +pub const WIRES: [usize; KIMCHI_COLS] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]; /// Wire documents the other cell that is wired to this one. /// If the cell represents an internal wire, an input to the circuit, diff --git a/kimchi/src/circuits/witness/constant_cell.rs b/kimchi/src/circuits/witness/constant_cell.rs index ea14b5de8c..03f333ced1 100644 --- a/kimchi/src/circuits/witness/constant_cell.rs +++ b/kimchi/src/circuits/witness/constant_cell.rs @@ -13,8 +13,13 @@ impl ConstantCell { } } -impl WitnessCell for ConstantCell { - fn value(&self, _witness: &mut [Vec; W], _variables: &Variables, _index: usize) -> F { +impl WitnessCell for ConstantCell { + fn value( + &self, + _witness: &mut [Vec; COLUMNS], + _variables: &Variables, + _index: usize, + ) -> F { self.value } } diff --git a/kimchi/src/circuits/witness/copy_bits_cell.rs b/kimchi/src/circuits/witness/copy_bits_cell.rs index 964e2f26bf..66a8a4f099 100644 --- a/kimchi/src/circuits/witness/copy_bits_cell.rs +++ b/kimchi/src/circuits/witness/copy_bits_cell.rs @@ -23,8 +23,13 @@ impl CopyBitsCell { } } -impl WitnessCell for CopyBitsCell { - fn value(&self, witness: &mut [Vec; W], _variables: &Variables, _index: usize) -> F { +impl WitnessCell for CopyBitsCell { + fn value( + &self, + witness: &mut [Vec; COLUMNS], + _variables: &Variables, + _index: usize, + ) -> F { F::from_bits(&witness[self.col][self.row].to_bits()[self.start..self.end]) .expect("failed to deserialize field bits for copy bits cell") } diff --git a/kimchi/src/circuits/witness/copy_cell.rs b/kimchi/src/circuits/witness/copy_cell.rs index ffa8339094..87784943f1 100644 --- a/kimchi/src/circuits/witness/copy_cell.rs +++ b/kimchi/src/circuits/witness/copy_cell.rs @@ -15,8 +15,13 @@ impl CopyCell { } } -impl WitnessCell for CopyCell { - fn value(&self, witness: &mut [Vec; W], _variables: &Variables, _index: usize) -> F { +impl WitnessCell for CopyCell { + fn value( + &self, + witness: &mut [Vec; COLUMNS], + _variables: &Variables, + _index: usize, + ) -> F { witness[self.col][self.row] } } diff --git a/kimchi/src/circuits/witness/copy_shift_cell.rs b/kimchi/src/circuits/witness/copy_shift_cell.rs index b0ed5d055a..f194e2f797 100644 --- a/kimchi/src/circuits/witness/copy_shift_cell.rs +++ b/kimchi/src/circuits/witness/copy_shift_cell.rs @@ -15,8 +15,13 @@ impl CopyShiftCell { } } -impl WitnessCell for CopyShiftCell { - fn value(&self, witness: &mut [Vec; W], _variables: &Variables, _index: usize) -> F { +impl WitnessCell for CopyShiftCell { + fn value( + &self, + witness: &mut [Vec; COLUMNS], + _variables: &Variables, + _index: usize, + ) -> F { F::from(2u32).pow([self.shift]) * witness[self.col][self.row] } } diff --git a/kimchi/src/circuits/witness/index_cell.rs b/kimchi/src/circuits/witness/index_cell.rs index 9d6ebefea5..f8fe8cd65a 100644 --- a/kimchi/src/circuits/witness/index_cell.rs +++ b/kimchi/src/circuits/witness/index_cell.rs @@ -18,8 +18,13 @@ impl<'a> IndexCell<'a> { } } -impl<'a, const W: usize, F: Field> WitnessCell> for IndexCell<'a> { - fn value(&self, _witness: &mut [Vec; W], variables: &Variables>, index: usize) -> F { +impl<'a, F: Field, const COLUMNS: usize> WitnessCell, COLUMNS> for IndexCell<'a> { + fn value( + &self, + _witness: &mut [Vec; COLUMNS], + variables: &Variables>, + index: usize, + ) -> F { assert!(index < self.length, "index out of bounds of `IndexCell`"); variables[self.name][index] } diff --git a/kimchi/src/circuits/witness/mod.rs b/kimchi/src/circuits/witness/mod.rs index 8dad3a2a40..ddee4bb9ea 100644 --- a/kimchi/src/circuits/witness/mod.rs +++ b/kimchi/src/circuits/witness/mod.rs @@ -20,10 +20,13 @@ pub use self::{ variables::{variable_map, variables, Variables}, }; -/// Witness cell interface -pub trait WitnessCell { - fn value(&self, witness: &mut [Vec; W], variables: &Variables, index: usize) -> F; +use super::polynomial::KIMCHI_COLS; +/// Witness cell interface. By default, the witness cell is a single element of type F. +pub trait WitnessCell { + fn value(&self, witness: &mut [Vec; COLUMNS], variables: &Variables, index: usize) -> F; + + // Length is 1 by default (T is single F element) unless overridden fn length(&self) -> usize { 1 } @@ -40,25 +43,25 @@ pub trait WitnessCell { /// - layout: the partial layout to initialize from /// - variables: the hashmap of variables to get the values from #[allow(clippy::too_many_arguments)] -pub fn init_cell( - witness: &mut [Vec; W], +pub fn init_cell( + witness: &mut [Vec; COLUMNS], offset: usize, row: usize, col: usize, cell: usize, index: usize, - layout: &[Vec>>], + layout: &[Vec>>], variables: &Variables, ) { witness[col][row + offset] = layout[row][cell].value(witness, variables, index); } /// Initialize a witness row based on layout and computed variables -pub fn init_row( - witness: &mut [Vec; W], +pub fn init_row( + witness: &mut [Vec; COLUMNS], offset: usize, row: usize, - layout: &[Vec>>], + layout: &[Vec>>], variables: &Variables, ) { let mut col = 0; @@ -72,10 +75,10 @@ pub fn init_row( } /// Initialize a witness based on layout and computed variables -pub fn init( - witness: &mut [Vec; W], +pub fn init( + witness: &mut [Vec; COLUMNS], offset: usize, - layout: &[Vec>>], + layout: &[Vec>>], variables: &Variables, ) { for row in 0..layout.len() { @@ -89,7 +92,7 @@ mod tests { use super::*; - use crate::circuits::polynomial::COLUMNS; + use crate::circuits::polynomial::KIMCHI_COLS; use ark_ec::AffineCurve; use ark_ff::{Field, One, Zero}; use mina_curves::pasta::Pallas; @@ -97,7 +100,7 @@ mod tests { #[test] fn zero_layout() { - let layout: Vec>>> = vec![vec![ + let layout: Vec>>> = vec![vec![ ConstantCell::create(PallasField::zero()), ConstantCell::create(PallasField::zero()), ConstantCell::create(PallasField::zero()), @@ -115,7 +118,7 @@ mod tests { ConstantCell::create(PallasField::zero()), ]]; - let mut witness: [Vec; COLUMNS] = + let mut witness: [Vec; KIMCHI_COLS] = array::from_fn(|_| vec![PallasField::one(); 1]); for col in witness.clone() { @@ -140,7 +143,7 @@ mod tests { #[test] fn mixed_layout() { - let layout: Vec>>> = vec![ + let layout: Vec>>> = vec![ vec![ ConstantCell::create(PallasField::from(12u32)), ConstantCell::create(PallasField::from(0xa5a3u32)), @@ -177,7 +180,7 @@ mod tests { ], ]; - let mut witness: [Vec; COLUMNS] = + let mut witness: [Vec; KIMCHI_COLS] = array::from_fn(|_| vec![PallasField::zero(); 2]); // Local variable (witness computation) with same names as VariableCell above @@ -213,7 +216,7 @@ mod tests { assert_eq!(witness[7][1], something_else); assert_eq!(witness[14][1], final_value); - let mut witness2: [Vec; COLUMNS] = + let mut witness2: [Vec; KIMCHI_COLS] = array::from_fn(|_| vec![PallasField::zero(); 2]); init( &mut witness2, diff --git a/kimchi/src/circuits/witness/variable_bits_cell.rs b/kimchi/src/circuits/witness/variable_bits_cell.rs index 1fef513607..b380e25e19 100644 --- a/kimchi/src/circuits/witness/variable_bits_cell.rs +++ b/kimchi/src/circuits/witness/variable_bits_cell.rs @@ -18,8 +18,13 @@ impl<'a> VariableBitsCell<'a> { } } -impl<'a, const W: usize, F: Field> WitnessCell for VariableBitsCell<'a> { - fn value(&self, _witness: &mut [Vec; W], variables: &Variables, _index: usize) -> F { +impl<'a, F: Field, const COLUMNS: usize> WitnessCell for VariableBitsCell<'a> { + fn value( + &self, + _witness: &mut [Vec; COLUMNS], + variables: &Variables, + _index: usize, + ) -> F { let bits = if let Some(end) = self.end { F::from_bits(&variables[self.name].to_bits()[self.start..end]) } else { diff --git a/kimchi/src/circuits/witness/variable_cell.rs b/kimchi/src/circuits/witness/variable_cell.rs index c24ce57d42..5f7bb76253 100644 --- a/kimchi/src/circuits/witness/variable_cell.rs +++ b/kimchi/src/circuits/witness/variable_cell.rs @@ -14,8 +14,13 @@ impl<'a> VariableCell<'a> { } } -impl<'a, const W: usize, F: Field> WitnessCell for VariableCell<'a> { - fn value(&self, _witness: &mut [Vec; W], variables: &Variables, _index: usize) -> F { +impl<'a, F: Field, const COLUMNS: usize> WitnessCell for VariableCell<'a> { + fn value( + &self, + _witness: &mut [Vec; COLUMNS], + variables: &Variables, + _index: usize, + ) -> F { variables[self.name] } } diff --git a/kimchi/src/curve.rs b/kimchi/src/curve.rs index 981cc0b5f4..57790b10f7 100644 --- a/kimchi/src/curve.rs +++ b/kimchi/src/curve.rs @@ -1,68 +1,112 @@ //! This module contains a useful trait for recursion: [KimchiCurve], //! which defines how a pair of curves interact. -use ark_ec::{short_weierstrass_jacobian::GroupAffine, ModelParameters}; +use ark_ec::{short_weierstrass_jacobian::GroupAffine, AffineCurve, ModelParameters}; use mina_curves::pasta::curves::{ pallas::{LegacyPallasParameters, PallasParameters}, vesta::{LegacyVestaParameters, VestaParameters}, }; use mina_poseidon::poseidon::ArithmeticSpongeParams; use once_cell::sync::Lazy; -use poly_commitment::{commitment::CommitmentCurve, srs::endos}; +use poly_commitment::{ + commitment::{CommitmentCurve, EndoCurve}, + srs::endos, +}; /// Represents additional information that a curve needs in order to be used with Kimchi -pub trait KimchiCurve: CommitmentCurve { +pub trait KimchiCurve: CommitmentCurve + EndoCurve { /// A human readable name. const NAME: &'static str; - /// The other curve that forms the cycle used for recursion. - type OtherCurve: KimchiCurve< - ScalarField = Self::BaseField, - BaseField = Self::ScalarField, - OtherCurve = Self, - >; - /// Provides the sponge params to be used with this curve. - /// If the params for the base field are needed, they can be obtained from [`KimchiCurve::OtherCurve`]. fn sponge_params() -> &'static ArithmeticSpongeParams; - /// Provides the coefficients for the curve endomorphism called (q,r) in some places. + /// Provides the sponge params to be used with the other curve. + fn other_curve_sponge_params() -> &'static ArithmeticSpongeParams; + + /// Provides the coefficients for the curve endomorphism, called (q,r) in some places. fn endos() -> &'static (Self::BaseField, Self::ScalarField); + + /// Provides the coefficient for the curve endomorphism over the other field, called q in some + /// places. + fn other_curve_endo() -> &'static Self::ScalarField; + + /// Accessor for the other curve's prime subgroup generator, as coordinates + // TODO: This leaked from snarky.rs. Stop the bleed. + fn other_curve_prime_subgroup_generator() -> (Self::ScalarField, Self::ScalarField); +} + +fn vesta_endos() -> &'static ( + ::BaseField, + ::ScalarField, +) { + static VESTA_ENDOS: Lazy<( + ::BaseField, + ::ScalarField, + )> = Lazy::new(endos::>); + &VESTA_ENDOS +} + +fn pallas_endos() -> &'static ( + ::BaseField, + ::ScalarField, +) { + static PALLAS_ENDOS: Lazy<( + ::BaseField, + ::ScalarField, + )> = Lazy::new(endos::>); + &PALLAS_ENDOS } impl KimchiCurve for GroupAffine { const NAME: &'static str = "vesta"; - type OtherCurve = GroupAffine; - fn sponge_params() -> &'static ArithmeticSpongeParams { mina_poseidon::pasta::fp_kimchi::static_params() } + fn other_curve_sponge_params() -> &'static ArithmeticSpongeParams { + mina_poseidon::pasta::fq_kimchi::static_params() + } + fn endos() -> &'static (Self::BaseField, Self::ScalarField) { - static VESTA_ENDOS: Lazy<( - ::BaseField, - ::ScalarField, - )> = Lazy::new(endos::>); - &VESTA_ENDOS + vesta_endos() + } + + fn other_curve_endo() -> &'static Self::ScalarField { + &pallas_endos().0 + } + + fn other_curve_prime_subgroup_generator() -> (Self::ScalarField, Self::ScalarField) { + GroupAffine::::prime_subgroup_generator() + .to_coordinates() + .unwrap() } } impl KimchiCurve for GroupAffine { const NAME: &'static str = "pallas"; - type OtherCurve = GroupAffine; - fn sponge_params() -> &'static ArithmeticSpongeParams { mina_poseidon::pasta::fq_kimchi::static_params() } + fn other_curve_sponge_params() -> &'static ArithmeticSpongeParams { + mina_poseidon::pasta::fp_kimchi::static_params() + } + fn endos() -> &'static (Self::BaseField, Self::ScalarField) { - static PALLAS_ENDOS: Lazy<( - ::BaseField, - ::ScalarField, - )> = Lazy::new(endos::>); - &PALLAS_ENDOS + pallas_endos() + } + + fn other_curve_endo() -> &'static Self::ScalarField { + &vesta_endos().0 + } + + fn other_curve_prime_subgroup_generator() -> (Self::ScalarField, Self::ScalarField) { + GroupAffine::::prime_subgroup_generator() + .to_coordinates() + .unwrap() } } @@ -73,27 +117,88 @@ impl KimchiCurve for GroupAffine { impl KimchiCurve for GroupAffine { const NAME: &'static str = "legacy_vesta"; - type OtherCurve = GroupAffine; - fn sponge_params() -> &'static ArithmeticSpongeParams { mina_poseidon::pasta::fp_legacy::static_params() } + fn other_curve_sponge_params() -> &'static ArithmeticSpongeParams { + mina_poseidon::pasta::fq_legacy::static_params() + } + fn endos() -> &'static (Self::BaseField, Self::ScalarField) { - GroupAffine::::endos() + vesta_endos() + } + + fn other_curve_endo() -> &'static Self::ScalarField { + &pallas_endos().0 + } + + fn other_curve_prime_subgroup_generator() -> (Self::ScalarField, Self::ScalarField) { + GroupAffine::::prime_subgroup_generator() + .to_coordinates() + .unwrap() } } impl KimchiCurve for GroupAffine { const NAME: &'static str = "legacy_pallas"; - type OtherCurve = GroupAffine; - fn sponge_params() -> &'static ArithmeticSpongeParams { mina_poseidon::pasta::fq_legacy::static_params() } + fn other_curve_sponge_params() -> &'static ArithmeticSpongeParams { + mina_poseidon::pasta::fp_legacy::static_params() + } + + fn endos() -> &'static (Self::BaseField, Self::ScalarField) { + pallas_endos() + } + + fn other_curve_endo() -> &'static Self::ScalarField { + &vesta_endos().0 + } + + fn other_curve_prime_subgroup_generator() -> (Self::ScalarField, Self::ScalarField) { + GroupAffine::::prime_subgroup_generator() + .to_coordinates() + .unwrap() + } +} + +#[cfg(feature = "bn254")] +use mina_poseidon::dummy_values::kimchi_dummy; + +#[cfg(feature = "bn254")] +impl KimchiCurve for GroupAffine { + const NAME: &'static str = "bn254"; + + fn sponge_params() -> &'static ArithmeticSpongeParams { + // TODO: Generate some params + static PARAMS: Lazy> = Lazy::new(kimchi_dummy); + &PARAMS + } + + fn other_curve_sponge_params() -> &'static ArithmeticSpongeParams { + // TODO: Generate some params + static PARAMS: Lazy> = Lazy::new(kimchi_dummy); + &PARAMS + } + fn endos() -> &'static (Self::BaseField, Self::ScalarField) { - GroupAffine::::endos() + static ENDOS: Lazy<(ark_bn254::Fq, ark_bn254::Fr)> = + Lazy::new(endos::); + &ENDOS + } + + fn other_curve_endo() -> &'static Self::ScalarField { + // TODO: Dummy value, this is definitely not right + static ENDO: Lazy = Lazy::new(|| 13u64.into()); + &ENDO + } + + fn other_curve_prime_subgroup_generator() -> (Self::ScalarField, Self::ScalarField) { + // TODO: Dummy value, this is definitely not right + (44u64.into(), 88u64.into()) } } diff --git a/kimchi/src/error.rs b/kimchi/src/error.rs index 37e2f29638..3c0b352eb7 100644 --- a/kimchi/src/error.rs +++ b/kimchi/src/error.rs @@ -10,6 +10,11 @@ pub enum ProverError { #[error("the circuit is too large")] NoRoomForZkInWitness, + #[error( + "there are not enough random rows to achieve zero-knowledge (expected: {0}, got: {1})" + )] + NotZeroKnowledge(usize, usize), + #[error("the witness columns are not all the same size")] WitnessCsInconsistent, @@ -22,9 +27,6 @@ pub enum ProverError { #[error("the lookup failed to find a match in the table: row={0}")] ValueNotInTable(usize), - #[error("SRS size is smaller than the domain size required by the circuit")] - SRSTooSmall, - #[error("the runtime tables provided did not match the index's configuration")] RuntimeTablesInconsistent, @@ -35,8 +37,8 @@ pub enum ProverError { /// Errors that can arise when verifying a proof #[derive(Error, Debug, Clone, Copy)] pub enum VerifyError { - #[error("the commitment to {0} is of an unexpected size")] - IncorrectCommitmentLength(&'static str), + #[error("the commitment to {0} is of an unexpected size (expected {1}, got {2})")] + IncorrectCommitmentLength(&'static str, usize, usize), #[error("the public input is of an unexpected size (expected {0})")] IncorrectPubicInputLength(usize), @@ -44,8 +46,10 @@ pub enum VerifyError { #[error("the previous challenges have an unexpected length (expected {0}, got {1})")] IncorrectPrevChallengesLength(usize, usize), - #[error("proof malformed: an evaluation was of the incorrect size (all evaluations are expected to be of length 1)")] - IncorrectEvaluationsLength, + #[error( + "proof malformed: an evaluation for {2} was of the incorrect size (expected {0}, got {1})" + )] + IncorrectEvaluationsLength(usize, usize, &'static str), #[error("the opening proof failed to verify")] OpenProof, @@ -69,10 +73,23 @@ pub enum VerifyError { IncorrectRuntimeProof, #[error("the evaluation for {0:?} is missing")] - MissingEvaluation(crate::circuits::expr::Column), + MissingEvaluation(crate::circuits::berkeley_columns::Column), + + #[error("the evaluation for PublicInput is missing")] + MissingPublicInputEvaluation, #[error("the commitment for {0:?} is missing")] - MissingCommitment(crate::circuits::expr::Column), + MissingCommitment(crate::circuits::berkeley_columns::Column), +} + +/// Errors that can arise when preparing the setup +#[derive(Error, Debug, Clone)] +pub enum DomainCreationError { + #[error("could not compute the size of domain for {0}")] + DomainSizeFailed(usize), + + #[error("construction of domain {0} for size {1} failed")] + DomainConstructionFailed(String, usize), } /// Errors that can arise when preparing the setup @@ -82,7 +99,7 @@ pub enum SetupError { ConstraintSystem(String), #[error("the domain could not be constructed: {0}")] - DomainCreation(&'static str), + DomainCreation(DomainCreationError), } /// Errors that can arise when creating a verifier index diff --git a/kimchi/src/lagrange_basis_evaluations.rs b/kimchi/src/lagrange_basis_evaluations.rs index 157fb5af2a..ee825a0ae1 100644 --- a/kimchi/src/lagrange_basis_evaluations.rs +++ b/kimchi/src/lagrange_basis_evaluations.rs @@ -5,39 +5,49 @@ use rayon::prelude::*; /// The evaluations of all normalized lagrange basis polynomials at a given /// point. Can be used to evaluate an `Evaluations` form polynomial at that point. pub struct LagrangeBasisEvaluations { - pub evals: Vec, + evals: Vec>, } impl LagrangeBasisEvaluations { /// Given the evaluations form of a polynomial, directly evaluate that polynomial at a point. - pub fn evaluate>(&self, p: &Evaluations) -> F { - assert_eq!(p.evals.len() % self.evals.len(), 0); - let stride = p.evals.len() / self.evals.len(); + pub fn evaluate>(&self, p: &Evaluations) -> Vec { + assert_eq!(p.evals.len() % self.evals[0].len(), 0); + let stride = p.evals.len() / self.evals[0].len(); let p_evals = &p.evals; (&self.evals) .into_par_iter() - .enumerate() - .map(|(i, e)| p_evals[stride * i] * e) - .sum() + .map(|evals| { + evals + .into_par_iter() + .enumerate() + .map(|(i, e)| p_evals[stride * i] * e) + .sum() + }) + .collect() } /// Given the evaluations form of a polynomial, directly evaluate that polynomial at a point, /// assuming that the given evaluations are either 0 or 1 at every point of the domain. - pub fn evaluate_boolean>(&self, p: &Evaluations) -> F { - assert_eq!(p.evals.len() % self.evals.len(), 0); - let stride = p.evals.len() / self.evals.len(); - let mut result = F::zero(); - for (i, e) in self.evals.iter().enumerate() { - if !p.evals[stride * i].is_zero() { - result += e; - } - } - result + pub fn evaluate_boolean>(&self, p: &Evaluations) -> Vec { + assert_eq!(p.evals.len() % self.evals[0].len(), 0); + let stride = p.evals.len() / self.evals[0].len(); + self.evals + .iter() + .map(|evals| { + let mut result = F::zero(); + for (i, e) in evals.iter().enumerate() { + if !p.evals[stride * i].is_zero() { + result += e; + } + } + result + }) + .collect() } /// Compute all evaluations of the normalized lagrange basis polynomials of the /// given domain at the given point. Runs in time O(domain size). - pub fn new(domain: D, x: F) -> LagrangeBasisEvaluations { + fn new_with_segment_size_1(domain: D, x: F) -> LagrangeBasisEvaluations { let n = domain.size(); // We want to compute for all i // s_i = 1 / t_i @@ -98,7 +108,40 @@ impl LagrangeBasisEvaluations { // Denominators now contains the desired result. LagrangeBasisEvaluations { - evals: denominators, + evals: vec![denominators], + } + } + + /// Compute all evaluations of the normalized lagrange basis polynomials of the + /// given domain at the given point. Runs in time O(n log(n)) for n = domain size. + fn new_with_chunked_segments( + max_poly_size: usize, + domain: D, + x: F, + ) -> LagrangeBasisEvaluations { + let n = domain.size(); + let num_chunks = n / max_poly_size; + let mut evals = Vec::with_capacity(num_chunks); + for i in 0..num_chunks { + let mut x_pow = F::one(); + let mut chunked_evals = vec![F::zero(); n]; + for j in 0..max_poly_size { + chunked_evals[i * max_poly_size + j] = x_pow; + x_pow *= x; + } + // This uses the same trick as `poly_commitment::srs::SRS::add_lagrange_basis`, but + // applied to field elements instead of group elements. + domain.ifft_in_place(&mut chunked_evals); + evals.push(chunked_evals); + } + LagrangeBasisEvaluations { evals } + } + + pub fn new(max_poly_size: usize, domain: D, x: F) -> LagrangeBasisEvaluations { + if domain.size() <= max_poly_size { + Self::new_with_segment_size_1(domain, x) + } else { + Self::new_with_chunked_segments(max_poly_size, domain, x) } } } @@ -118,19 +161,44 @@ mod tests { let domain = Radix2EvaluationDomain::new(n).unwrap(); let rng = &mut StdRng::from_seed([0u8; 32]); let x = Fp::rand(rng); - let evaluator = LagrangeBasisEvaluations::new(domain, x); + let evaluator = LagrangeBasisEvaluations::new(domain.size(), domain, x); let expected = (0..n).map(|i| { let mut lagrange_i = vec![Fp::zero(); n]; lagrange_i[i] = Fp::one(); - Evaluations::from_vec_and_domain(lagrange_i, domain) + vec![Evaluations::from_vec_and_domain(lagrange_i, domain) .interpolate() - .evaluate(&x) + .evaluate(&x)] }); - for (i, expected) in expected.enumerate() { - if evaluator.evals[i] != expected { - panic!("{}, {}: {} != {}", line!(), i, evaluator.evals[i], expected); + for (i, (expected, got)) in expected.zip(evaluator.evals).enumerate() { + for (j, (expected, got)) in expected.iter().zip(got.iter()).enumerate() { + if got != expected { + panic!("{}, {}, {}: {} != {}", line!(), i, j, got, expected); + } + } + } + } + + #[test] + fn test_new_with_chunked_segments() { + let n = 1 << 4; + let domain = Radix2EvaluationDomain::new(n).unwrap(); + let rng = &mut StdRng::from_seed([0u8; 32]); + let x = Fp::rand(rng); + let evaluator = LagrangeBasisEvaluations::new(domain.size(), domain, x); + let evaluator_chunked = + LagrangeBasisEvaluations::new_with_chunked_segments(domain.size(), domain, x); + for (i, (evals, evals_chunked)) in evaluator + .evals + .iter() + .zip(evaluator_chunked.evals.iter()) + .enumerate() + { + for (j, (evals, evals_chunked)) in evals.iter().zip(evals_chunked.iter()).enumerate() { + if evals != evals_chunked { + panic!("{}, {}, {}: {} != {}", line!(), i, j, evals, evals_chunked); + } } } } @@ -151,10 +219,10 @@ mod tests { let x = Fp::rand(rng); - let evaluator = LagrangeBasisEvaluations::new(domain, x); + let evaluator = LagrangeBasisEvaluations::new(domain.size(), domain, x); let y = evaluator.evaluate(&evals); - let expected = evals.interpolate().evaluate(&x); + let expected = vec![evals.interpolate().evaluate(&x)]; assert_eq!(y, expected) } @@ -179,10 +247,10 @@ mod tests { let x = Fp::rand(rng); - let evaluator = LagrangeBasisEvaluations::new(domain, x); + let evaluator = LagrangeBasisEvaluations::new(domain.size(), domain, x); let y = evaluator.evaluate_boolean(&evals); - let expected = evals.interpolate().evaluate(&x); + let expected = vec![evals.interpolate().evaluate(&x)]; assert_eq!(y, expected) } } diff --git a/kimchi/src/linearization.rs b/kimchi/src/linearization.rs index 925e520e3d..0f556e87ee 100644 --- a/kimchi/src/linearization.rs +++ b/kimchi/src/linearization.rs @@ -8,7 +8,7 @@ use crate::circuits::expr; use crate::circuits::lookup; use crate::circuits::lookup::{ constraints::LookupConfiguration, - lookups::{LookupFeatures, LookupInfo, LookupPatterns}, + lookups::{LookupFeatures, LookupInfo, LookupPattern, LookupPatterns}, }; use crate::circuits::polynomials::keccak; use crate::circuits::polynomials::keccak::circuitgates::{KeccakRound, KeccakSponge}; @@ -27,8 +27,9 @@ use crate::circuits::polynomials::{ }; use crate::circuits::{ + berkeley_columns::Column, constraints::FeatureFlags, - expr::{Column, ConstantExpr, Expr, FeatureFlag, Linearization, PolishToken}, + expr::{ConstantExpr, Expr, FeatureFlag, Linearization, PolishToken}, gate::GateType, }; use ark_ff::{FftField, PrimeField, SquareRootField, Zero}; @@ -38,10 +39,10 @@ use ark_ff::{FftField, PrimeField, SquareRootField, Zero}; /// # Panics /// /// Will panic if `generic_gate` is not associate with `alpha^0`. -pub fn constraints_expr( +pub fn constraints_expr( feature_flags: Option<&FeatureFlags>, generic: bool, -) -> (Expr>, Alphas) { +) -> (Expr, Column>, Alphas) { // register powers of alpha so that we don't reuse them across mutually inclusive constraints let mut powers_of_alpha = Alphas::::default(); @@ -267,7 +268,7 @@ pub fn constraints_expr( // flags. if cfg!(feature = "check_feature_flags") { if let Some(feature_flags) = feature_flags { - let (feature_flagged_expr, _) = constraints_expr::(None, generic); + let (feature_flagged_expr, _) = constraints_expr::(None, generic); let feature_flagged_expr = feature_flagged_expr.apply_feature_flags(feature_flags); assert_eq!(expr, feature_flagged_expr); } @@ -279,7 +280,7 @@ pub fn constraints_expr( /// Adds the polynomials that are evaluated as part of the proof /// for the linearization to work. -pub fn linearization_columns( +pub fn linearization_columns( feature_flags: Option<&FeatureFlags>, ) -> std::collections::HashSet { let mut h = std::collections::HashSet::new(); @@ -315,12 +316,12 @@ pub fn linearization_columns( }; // the witness polynomials - for i in 0..W { + for i in 0..COLUMNS { h.insert(Witness(i)); } // the coefficient polynomials - for i in 0..W { + for i in 0..COLUMNS { h.insert(Coefficient(i)); } @@ -353,6 +354,30 @@ pub fn linearization_columns( // the generic selector polynomial h.insert(Index(GateType::Generic)); + h.insert(Index(GateType::CompleteAdd)); + h.insert(Index(GateType::VarBaseMul)); + h.insert(Index(GateType::EndoMul)); + h.insert(Index(GateType::EndoMulScalar)); + + // optional columns + h.insert(Index(GateType::RangeCheck0)); + h.insert(Index(GateType::RangeCheck1)); + h.insert(Index(GateType::ForeignFieldAdd)); + h.insert(Index(GateType::ForeignFieldMul)); + h.insert(Index(GateType::Xor16)); + h.insert(Index(GateType::Rot64)); + h.insert(Index(GateType::KeccakRound)); + h.insert(Index(GateType::KeccakSponge)); + + // lookup selectors + h.insert(LookupRuntimeSelector); + h.insert(LookupKindIndex(LookupPattern::Xor)); + h.insert(LookupKindIndex(LookupPattern::Lookup)); + h.insert(LookupKindIndex(LookupPattern::RangeCheck)); + h.insert(LookupKindIndex(LookupPattern::ForeignFieldMul)); + //h.insert(LookupKindIndex(LookupPattern::KeccakRound)); + //h.insert(LookupKindIndex(LookupPattern::KeccakSponge)); + h } @@ -364,18 +389,24 @@ pub fn linearization_columns( /// # Panics /// /// Will panic if the `linearization` process fails. -pub fn expr_linearization( +#[allow(clippy::type_complexity)] +pub fn expr_linearization( feature_flags: Option<&FeatureFlags>, generic: bool, -) -> (Linearization>>, Alphas) { - let evaluated_cols = linearization_columns::(feature_flags); +) -> ( + Linearization>, Column>, + Alphas, +) { + let evaluated_cols = linearization_columns::(feature_flags); - let (expr, powers_of_alpha) = constraints_expr::(feature_flags, generic); + let (expr, powers_of_alpha) = constraints_expr::(feature_flags, generic); let linearization = expr .linearize(evaluated_cols) .unwrap() .map(|e| e.to_polish()); + assert_eq!(linearization.index_terms.len(), 0); + (linearization, powers_of_alpha) } diff --git a/kimchi/src/oracles.rs b/kimchi/src/oracles.rs index b621f152db..52a2f5e081 100644 --- a/kimchi/src/oracles.rs +++ b/kimchi/src/oracles.rs @@ -37,9 +37,9 @@ where #[cfg(feature = "ocaml_types")] pub mod caml { - use crate::circuits::wires::COLUMNS; + use crate::circuits::wires::KIMCHI_COLS; use ark_ff::PrimeField; - use poly_commitment::commitment::shift_scalar; + use poly_commitment::{commitment::shift_scalar, evaluation_proof::OpeningProof}; use crate::{ circuits::scalars::caml::CamlRandomOracles, curve::KimchiCurve, error::VerifyError, @@ -58,15 +58,15 @@ pub mod caml { pub fn create_caml_oracles( lgr_comm: Vec>, - index: VerifierIndex, - proof: ProverProof, + index: VerifierIndex, KIMCHI_COLS>, + proof: ProverProof, KIMCHI_COLS>, public_input: &[G::ScalarField], ) -> Result, VerifyError> where G: KimchiCurve, G::BaseField: PrimeField, EFqSponge: Clone + FqSponge, - EFrSponge: FrSponge, + EFrSponge: FrSponge, CamlF: From, { let lgr_comm: Vec> = lgr_comm.into_iter().take(public_input.len()).collect(); @@ -77,7 +77,7 @@ pub mod caml { let p_comm = PolyComm::::multi_scalar_mul(&lgr_comm_refs, &negated_public); let oracles_result = - proof.oracles::(&index, &p_comm, public_input)?; + proof.oracles::(&index, &p_comm, Some(public_input))?; let (mut sponge, combined_inner_product, public_evals, digest, oracles) = ( oracles_result.fq_sponge, diff --git a/kimchi/src/plonk_sponge.rs b/kimchi/src/plonk_sponge.rs index e5b496f50c..0df42f38cb 100644 --- a/kimchi/src/plonk_sponge.rs +++ b/kimchi/src/plonk_sponge.rs @@ -5,9 +5,10 @@ use mina_poseidon::{ poseidon::{ArithmeticSponge, ArithmeticSpongeParams, Sponge}, }; -use crate::proof::{LookupEvaluations, PointEvaluations, ProofEvaluations}; +use crate::circuits::wires::KIMCHI_COLS; +use crate::proof::{PointEvaluations, ProofEvaluations}; -pub trait FrSponge { +pub trait FrSponge { /// Creates a new Fr-Sponge. fn new(p: &'static ArithmeticSpongeParams) -> Self; @@ -25,10 +26,10 @@ pub trait FrSponge { /// Absorbs the given evaluations into the sponge. // TODO: IMO this function should be inlined in prover/verifier - fn absorb_evaluations(&mut self, e: &ProofEvaluations>>); + fn absorb_evaluations(&mut self, e: &ProofEvaluations>, COLUMNS>); } -impl FrSponge for DefaultFrSponge { +impl FrSponge for DefaultFrSponge { fn new(params: &'static ArithmeticSpongeParams) -> DefaultFrSponge { DefaultFrSponge { sponge: ArithmeticSponge::new(params), @@ -56,36 +57,119 @@ impl FrSponge for DefaultFrSponge } // We absorb all evaluations of the same polynomial at the same time - fn absorb_evaluations(&mut self, e: &ProofEvaluations>>) { + fn absorb_evaluations(&mut self, e: &ProofEvaluations>, COLUMNS>) { self.last_squeezed = vec![]; let ProofEvaluations { + public: _, // Must be absorbed first manually for now, to handle Mina annoyances w, z, s, coefficients, - lookup, generic_selector, poseidon_selector, + complete_add_selector, + mul_selector, + emul_selector, + endomul_scalar_selector, + range_check0_selector, + range_check1_selector, + foreign_field_add_selector, + foreign_field_mul_selector, + xor_selector, + rot_selector, + keccak_round_selector, + keccak_sponge_selector, + lookup_aggregation, + lookup_table, + lookup_sorted, + runtime_lookup_table, + runtime_lookup_table_selector, + xor_lookup_selector, + lookup_gate_lookup_selector, + range_check_lookup_selector, + foreign_field_mul_lookup_selector, + //keccak_round_lookup_selector, + //keccak_sponge_lookup_selector, } = e; - let mut points = vec![z, generic_selector, poseidon_selector]; + let mut points = vec![ + z, + generic_selector, + poseidon_selector, + complete_add_selector, + mul_selector, + emul_selector, + endomul_scalar_selector, + ]; w.iter().for_each(|w_i| points.push(w_i)); coefficients.iter().for_each(|c_i| points.push(c_i)); s.iter().for_each(|s_i| points.push(s_i)); - if let Some(l) = lookup.as_ref() { - let LookupEvaluations { - sorted, - aggreg, - table, - runtime, - } = l; - points.push(aggreg); - points.push(table); - sorted.iter().for_each(|s| points.push(s)); - runtime.iter().for_each(|x| points.push(x)); + // Optional gates + + if let Some(range_check0_selector) = range_check0_selector.as_ref() { + points.push(range_check0_selector) + } + if let Some(range_check1_selector) = range_check1_selector.as_ref() { + points.push(range_check1_selector) + } + if let Some(foreign_field_add_selector) = foreign_field_add_selector.as_ref() { + points.push(foreign_field_add_selector) + } + if let Some(foreign_field_mul_selector) = foreign_field_mul_selector.as_ref() { + points.push(foreign_field_mul_selector) + } + if let Some(xor_selector) = xor_selector.as_ref() { + points.push(xor_selector) + } + if let Some(rot_selector) = rot_selector.as_ref() { + points.push(rot_selector) + } + if let Some(keccak_round_selector) = keccak_round_selector.as_ref() { + points.push(keccak_round_selector) + } + if let Some(keccak_sponge_selector) = keccak_sponge_selector.as_ref() { + points.push(keccak_sponge_selector) + } + if let Some(lookup_aggregation) = lookup_aggregation.as_ref() { + points.push(lookup_aggregation) + } + if let Some(lookup_table) = lookup_table.as_ref() { + points.push(lookup_table) + } + for lookup_sorted in lookup_sorted { + if let Some(lookup_sorted) = lookup_sorted.as_ref() { + points.push(lookup_sorted) + } + } + if let Some(runtime_lookup_table) = runtime_lookup_table.as_ref() { + points.push(runtime_lookup_table) + } + if let Some(runtime_lookup_table_selector) = runtime_lookup_table_selector.as_ref() { + points.push(runtime_lookup_table_selector) + } + if let Some(xor_lookup_selector) = xor_lookup_selector.as_ref() { + points.push(xor_lookup_selector) + } + if let Some(lookup_gate_lookup_selector) = lookup_gate_lookup_selector.as_ref() { + points.push(lookup_gate_lookup_selector) + } + if let Some(range_check_lookup_selector) = range_check_lookup_selector.as_ref() { + points.push(range_check_lookup_selector) + } + if let Some(foreign_field_mul_lookup_selector) = foreign_field_mul_lookup_selector.as_ref() + { + points.push(foreign_field_mul_lookup_selector) + } + /* + if let Some(keccak_round_lookup_selector) = keccak_round_lookup_selector.as_ref() { + points.push(keccak_round_lookup_selector) + } + if let Some(keccak_sponge_lookup_selector) = keccak_sponge_lookup_selector.as_ref() { + points.push(keccak_sponge_lookup_selector) } + */ points.into_iter().for_each(|p| { self.sponge.absorb(&p.zeta); diff --git a/kimchi/src/proof.rs b/kimchi/src/proof.rs index eda023418f..be4c6f019a 100644 --- a/kimchi/src/proof.rs +++ b/kimchi/src/proof.rs @@ -1,14 +1,16 @@ //! This module implements the data structures of a proof. -use crate::circuits::{expr::Column, gate::GateType, wires::PERMUTS}; +use crate::circuits::{ + berkeley_columns::Column, + gate::GateType, + lookup::lookups::LookupPattern, + wires::{KIMCHI_COLS, PERMUTS}, +}; use ark_ec::AffineCurve; use ark_ff::{FftField, One, Zero}; use ark_poly::univariate::DensePolynomial; use o1_utils::ExtendedDensePolynomial; -use poly_commitment::{ - commitment::{b_poly, b_poly_coefficients, PolyComm}, - evaluation_proof::OpeningProof, -}; +use poly_commitment::commitment::{b_poly, b_poly_coefficients, PolyComm}; use serde::{Deserialize, Serialize}; use serde_with::serde_as; use std::array; @@ -34,44 +36,84 @@ pub struct PointEvaluations { pub zeta_omega: Evals, } -/// Evaluations of lookup polynomials -#[serde_as] -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct LookupEvaluations { - /// sorted lookup table polynomial - pub sorted: Vec, - /// lookup aggregation polynomial - pub aggreg: Evals, - // TODO: May be possible to optimize this away? - /// lookup table polynomial - pub table: Evals, - - /// Optionally, a runtime table polynomial. - pub runtime: Option, -} - // TODO: this should really be vectors here, perhaps create another type for chunked evaluations? /// Polynomial evaluations contained in a `ProverProof`. /// - **Chunked evaluations** `Field` is instantiated with vectors with a length that equals the length of the chunk /// - **Non chunked evaluations** `Field` is instantiated with a field, so they are single-sized#[serde_as] #[serde_as] #[derive(Debug, Clone, Serialize, Deserialize)] -pub struct ProofEvaluations { +pub struct ProofEvaluations { + /// public input polynomials + pub public: Option, /// witness polynomials - pub w: Vec, + #[serde_as(as = "[_; COLUMNS]")] + pub w: [Evals; COLUMNS], /// permutation polynomial pub z: Evals, /// permutation polynomials /// (PERMUTS-1 evaluations because the last permutation is only used in commitment form) pub s: [Evals; PERMUTS - 1], /// coefficient polynomials - pub coefficients: Vec, - /// lookup-related evaluations - pub lookup: Option>, + #[serde_as(as = "[_; COLUMNS]")] + pub coefficients: [Evals; COLUMNS], /// evaluation of the generic selector polynomial pub generic_selector: Evals, /// evaluation of the poseidon selector polynomial pub poseidon_selector: Evals, + /// evaluation of the elliptic curve addition selector polynomial + pub complete_add_selector: Evals, + /// evaluation of the elliptic curve variable base scalar multiplication selector polynomial + pub mul_selector: Evals, + /// evaluation of the endoscalar multiplication selector polynomial + pub emul_selector: Evals, + /// evaluation of the endoscalar multiplication scalar computation selector polynomial + pub endomul_scalar_selector: Evals, + + // Optional gates + /// evaluation of the RangeCheck0 selector polynomial + pub range_check0_selector: Option, + /// evaluation of the RangeCheck1 selector polynomial + pub range_check1_selector: Option, + /// evaluation of the ForeignFieldAdd selector polynomial + pub foreign_field_add_selector: Option, + /// evaluation of the ForeignFieldMul selector polynomial + pub foreign_field_mul_selector: Option, + /// evaluation of the Xor selector polynomial + pub xor_selector: Option, + /// evaluation of the Rot selector polynomial + pub rot_selector: Option, + /// evaluation of the KeccakRound selector polynomial + pub keccak_round_selector: Option, + /// evaluation of the KeccakRound selector polynomial + pub keccak_sponge_selector: Option, + + // lookup-related evaluations + /// evaluation of lookup aggregation polynomial + pub lookup_aggregation: Option, + /// evaluation of lookup table polynomial + pub lookup_table: Option, + /// evaluation of lookup sorted polynomials + pub lookup_sorted: [Option; 5], + /// evaluation of runtime lookup table polynomial + pub runtime_lookup_table: Option, + + // lookup selectors + /// evaluation of the runtime lookup table selector polynomial + pub runtime_lookup_table_selector: Option, + /// evaluation of the Xor range check pattern selector polynomial + pub xor_lookup_selector: Option, + /// evaluation of the Lookup range check pattern selector polynomial + pub lookup_gate_lookup_selector: Option, + /// evaluation of the RangeCheck range check pattern selector polynomial + pub range_check_lookup_selector: Option, + /// evaluation of the ForeignFieldMul range check pattern selector polynomial + pub foreign_field_mul_lookup_selector: Option, + /* + /// evaluation of the KeccakRound pattern selector polynomial + pub keccak_round_lookup_selector: Option, + /// evaluation of the KeccakSponge pattern selector polynomial + pub keccak_sponge_lookup_selector: Option, + */ } /// Commitments linked to the lookup feature @@ -91,7 +133,7 @@ pub struct LookupCommitments { #[serde_as] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(bound = "G: ark_serialize::CanonicalDeserialize + ark_serialize::CanonicalSerialize")] -pub struct ProverCommitments { +pub struct ProverCommitments { /// The commitments to the witness (execution trace) pub w_comm: Vec>, /// The commitment to the permutation polynomial @@ -106,15 +148,19 @@ pub struct ProverCommitments { #[serde_as] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(bound = "G: ark_serialize::CanonicalDeserialize + ark_serialize::CanonicalSerialize")] -pub struct ProverProof { +pub struct ProverProof { /// All the polynomial commitments required in the proof - pub commitments: ProverCommitments, + pub commitments: ProverCommitments, /// batched commitment opening proof - pub proof: OpeningProof, + #[serde(bound( + serialize = "OpeningProof: Serialize", + deserialize = "OpeningProof: Deserialize<'de>" + ))] + pub proof: OpeningProof, /// Two evaluations over a number of committed polynomials - pub evals: ProofEvaluations>>, + pub evals: ProofEvaluations>, COLUMNS>, /// Required evaluation for [Maller's optimization](https://o1-labs.github.io/mina-book/crypto/plonk/maller_15.html#the-evaluation-of-l) #[serde_as(as = "o1_utils::serialization::SerdeAs")] @@ -159,78 +205,141 @@ impl PointEvaluations { } } -impl LookupEvaluations { - pub fn map Eval2>(self, f: &FN) -> LookupEvaluations { - let LookupEvaluations { - sorted, - aggreg, - table, - runtime, - } = self; - LookupEvaluations { - sorted: sorted.into_iter().map(f).collect(), - aggreg: f(aggreg), - table: f(table), - runtime: runtime.map(f), - } - } - - pub fn map_ref Eval2>(&self, f: &FN) -> LookupEvaluations { - let LookupEvaluations { - sorted, - aggreg, - table, - runtime, - } = self; - LookupEvaluations { - sorted: sorted.iter().map(f).collect(), - aggreg: f(aggreg), - table: f(table), - runtime: runtime.as_ref().map(f), - } - } -} - -impl ProofEvaluations { - pub fn map Eval2>(self, f: &FN) -> ProofEvaluations { +impl ProofEvaluations { + pub fn map Eval2>(self, f: &FN) -> ProofEvaluations { let ProofEvaluations { + public, w, z, s, coefficients, - lookup, generic_selector, poseidon_selector, + complete_add_selector, + mul_selector, + emul_selector, + endomul_scalar_selector, + range_check0_selector, + range_check1_selector, + foreign_field_add_selector, + foreign_field_mul_selector, + xor_selector, + rot_selector, + keccak_round_selector, + keccak_sponge_selector, + lookup_aggregation, + lookup_table, + lookup_sorted, + runtime_lookup_table, + runtime_lookup_table_selector, + xor_lookup_selector, + lookup_gate_lookup_selector, + range_check_lookup_selector, + foreign_field_mul_lookup_selector, + //keccak_round_lookup_selector, + //keccak_sponge_lookup_selector, } = self; ProofEvaluations { - w: w.into_iter().map(f).collect(), + public: public.map(f), + w: w.map(f), z: f(z), s: s.map(f), - coefficients: coefficients.into_iter().map(f).collect(), - lookup: lookup.map(|x| LookupEvaluations::map(x, f)), + coefficients: coefficients.map(f), generic_selector: f(generic_selector), poseidon_selector: f(poseidon_selector), + complete_add_selector: f(complete_add_selector), + mul_selector: f(mul_selector), + emul_selector: f(emul_selector), + endomul_scalar_selector: f(endomul_scalar_selector), + range_check0_selector: range_check0_selector.map(f), + range_check1_selector: range_check1_selector.map(f), + foreign_field_add_selector: foreign_field_add_selector.map(f), + foreign_field_mul_selector: foreign_field_mul_selector.map(f), + xor_selector: xor_selector.map(f), + rot_selector: rot_selector.map(f), + keccak_round_selector: keccak_round_selector.map(f), + keccak_sponge_selector: keccak_sponge_selector.map(f), + lookup_aggregation: lookup_aggregation.map(f), + lookup_table: lookup_table.map(f), + lookup_sorted: lookup_sorted.map(|x| x.map(f)), + runtime_lookup_table: runtime_lookup_table.map(f), + runtime_lookup_table_selector: runtime_lookup_table_selector.map(f), + xor_lookup_selector: xor_lookup_selector.map(f), + lookup_gate_lookup_selector: lookup_gate_lookup_selector.map(f), + range_check_lookup_selector: range_check_lookup_selector.map(f), + foreign_field_mul_lookup_selector: foreign_field_mul_lookup_selector.map(f), + //keccak_round_lookup_selector: keccak_round_lookup_selector.map(f), + //keccak_sponge_lookup_selector: keccak_sponge_lookup_selector.map(f), } } - pub fn map_ref Eval2>(&self, f: &FN) -> ProofEvaluations { + pub fn map_ref Eval2>( + &self, + f: &FN, + ) -> ProofEvaluations { let ProofEvaluations { + public, w, z, s: [s0, s1, s2, s3, s4, s5], coefficients, - lookup, generic_selector, poseidon_selector, + complete_add_selector, + mul_selector, + emul_selector, + endomul_scalar_selector, + range_check0_selector, + range_check1_selector, + foreign_field_add_selector, + foreign_field_mul_selector, + xor_selector, + rot_selector, + keccak_round_selector, + keccak_sponge_selector, + lookup_aggregation, + lookup_table, + lookup_sorted, + runtime_lookup_table, + runtime_lookup_table_selector, + xor_lookup_selector, + lookup_gate_lookup_selector, + range_check_lookup_selector, + foreign_field_mul_lookup_selector, + //keccak_round_lookup_selector, + //keccak_sponge_lookup_selector, } = self; ProofEvaluations { - w: w.iter().map(f).collect(), + public: public.as_ref().map(f), + w: array::from_fn(|i: usize| f(&w[i])), z: f(z), s: [f(s0), f(s1), f(s2), f(s3), f(s4), f(s5)], - coefficients: coefficients.iter().map(f).collect(), - lookup: lookup.as_ref().map(|l| l.map_ref(f)), + coefficients: array::from_fn(|i: usize| f(&coefficients[i])), generic_selector: f(generic_selector), poseidon_selector: f(poseidon_selector), + complete_add_selector: f(complete_add_selector), + mul_selector: f(mul_selector), + emul_selector: f(emul_selector), + endomul_scalar_selector: f(endomul_scalar_selector), + range_check0_selector: range_check0_selector.as_ref().map(f), + range_check1_selector: range_check1_selector.as_ref().map(f), + foreign_field_add_selector: foreign_field_add_selector.as_ref().map(f), + foreign_field_mul_selector: foreign_field_mul_selector.as_ref().map(f), + xor_selector: xor_selector.as_ref().map(f), + rot_selector: rot_selector.as_ref().map(f), + keccak_round_selector: keccak_round_selector.as_ref().map(f), + keccak_sponge_selector: keccak_sponge_selector.as_ref().map(f), + lookup_aggregation: lookup_aggregation.as_ref().map(f), + lookup_table: lookup_table.as_ref().map(f), + lookup_sorted: array::from_fn(|i| lookup_sorted[i].as_ref().map(f)), + runtime_lookup_table: runtime_lookup_table.as_ref().map(f), + runtime_lookup_table_selector: runtime_lookup_table_selector.as_ref().map(f), + xor_lookup_selector: xor_lookup_selector.as_ref().map(f), + lookup_gate_lookup_selector: lookup_gate_lookup_selector.as_ref().map(f), + range_check_lookup_selector: range_check_lookup_selector.as_ref().map(f), + foreign_field_mul_lookup_selector: foreign_field_mul_lookup_selector.as_ref().map(f), + //keccak_round_lookup_selector: keccak_round_lookup_selector.as_ref().map(f), + //keccak_sponge_lookup_selector: keccak_sponge_lookup_selector.as_ref().map(f), } } } @@ -282,29 +391,55 @@ impl RecursionChallenge { } } -impl ProofEvaluations> { +impl ProofEvaluations, COLUMNS> { pub fn dummy_with_witness_evaluations( - curr: [F; W], - next: [F; W], - ) -> ProofEvaluations> { + curr: [F; COLUMNS], + next: [F; COLUMNS], + ) -> ProofEvaluations, COLUMNS> { let pt = |curr, next| PointEvaluations { zeta: curr, zeta_omega: next, }; ProofEvaluations { - w: curr.iter().zip(next).map(|(c, n)| pt(*c, n)).collect(), + public: Some(pt(F::zero(), F::zero())), + w: array::from_fn(|i| pt(curr[i], next[i])), z: pt(F::zero(), F::zero()), s: array::from_fn(|_| pt(F::zero(), F::zero())), - coefficients: vec![pt(F::zero(), F::zero()); W], - lookup: None, + coefficients: [pt(F::zero(), F::zero()); COLUMNS], generic_selector: pt(F::zero(), F::zero()), poseidon_selector: pt(F::zero(), F::zero()), + complete_add_selector: pt(F::zero(), F::zero()), + mul_selector: pt(F::zero(), F::zero()), + emul_selector: pt(F::zero(), F::zero()), + endomul_scalar_selector: pt(F::zero(), F::zero()), + range_check0_selector: None, + range_check1_selector: None, + foreign_field_add_selector: None, + foreign_field_mul_selector: None, + xor_selector: None, + rot_selector: None, + keccak_round_selector: None, + keccak_sponge_selector: None, + lookup_aggregation: None, + lookup_table: None, + lookup_sorted: array::from_fn(|_| None), + runtime_lookup_table: None, + runtime_lookup_table_selector: None, + xor_lookup_selector: None, + lookup_gate_lookup_selector: None, + range_check_lookup_selector: None, + foreign_field_mul_lookup_selector: None, + //keccak_round_lookup_selector: None, + //keccak_sponge_lookup_selector: None, } } } -impl ProofEvaluations>> { - pub fn combine(&self, pt: &PointEvaluations) -> ProofEvaluations> { +impl ProofEvaluations>, COLUMNS> { + pub fn combine( + &self, + pt: &PointEvaluations, + ) -> ProofEvaluations, COLUMNS> { self.map_ref(&|evals| PointEvaluations { zeta: DensePolynomial::eval_polynomial(&evals.zeta, pt.zeta), zeta_omega: DensePolynomial::eval_polynomial(&evals.zeta_omega, pt.zeta_omega), @@ -312,19 +447,47 @@ impl ProofEvaluations>> } } -impl ProofEvaluations { +impl ProofEvaluations { pub fn get_column(&self, col: Column) -> Option<&F> { match col { Column::Witness(i) => Some(&self.w[i]), Column::Z => Some(&self.z), - Column::LookupSorted(i) => Some(&self.lookup.as_ref()?.sorted[i]), - Column::LookupAggreg => Some(&self.lookup.as_ref()?.aggreg), - Column::LookupTable => Some(&self.lookup.as_ref()?.table), - Column::LookupKindIndex(_) => None, - Column::LookupRuntimeSelector => None, - Column::LookupRuntimeTable => Some(self.lookup.as_ref()?.runtime.as_ref()?), + Column::LookupSorted(i) => self.lookup_sorted[i].as_ref(), + Column::LookupAggreg => self.lookup_aggregation.as_ref(), + Column::LookupTable => self.lookup_table.as_ref(), + Column::LookupKindIndex(LookupPattern::Lookup) => { + self.lookup_gate_lookup_selector.as_ref() + } + Column::LookupKindIndex(LookupPattern::Xor) => self.xor_lookup_selector.as_ref(), + Column::LookupKindIndex(LookupPattern::RangeCheck) => { + self.range_check_lookup_selector.as_ref() + } + Column::LookupKindIndex(LookupPattern::ForeignFieldMul) => { + self.foreign_field_mul_lookup_selector.as_ref() + } + /* + Column::LookupKindIndex(LookupPattern::KeccakRound) => { + self.keccak_round_lookup_selector.as_ref() + } + Column::LookupKindIndex(LookupPattern::KeccakSponge) => { + self.keccak_sponge_lookup_selector.as_ref() + }*/ + Column::LookupRuntimeSelector => self.runtime_lookup_table_selector.as_ref(), + Column::LookupRuntimeTable => self.runtime_lookup_table.as_ref(), Column::Index(GateType::Generic) => Some(&self.generic_selector), Column::Index(GateType::Poseidon) => Some(&self.poseidon_selector), + Column::Index(GateType::CompleteAdd) => Some(&self.complete_add_selector), + Column::Index(GateType::VarBaseMul) => Some(&self.mul_selector), + Column::Index(GateType::EndoMul) => Some(&self.emul_selector), + Column::Index(GateType::EndoMulScalar) => Some(&self.endomul_scalar_selector), + Column::Index(GateType::RangeCheck0) => self.range_check0_selector.as_ref(), + Column::Index(GateType::RangeCheck1) => self.range_check1_selector.as_ref(), + Column::Index(GateType::ForeignFieldAdd) => self.foreign_field_add_selector.as_ref(), + Column::Index(GateType::ForeignFieldMul) => self.foreign_field_mul_selector.as_ref(), + Column::Index(GateType::Xor16) => self.xor_selector.as_ref(), + Column::Index(GateType::Rot64) => self.rot_selector.as_ref(), + Column::Index(GateType::KeccakRound) => self.keccak_round_selector.as_ref(), + Column::Index(GateType::KeccakSponge) => self.keccak_sponge_selector.as_ref(), Column::Index(_) => None, Column::Coefficient(i) => Some(&self.coefficients[i]), Column::Permutation(i) => Some(&self.s[i]), @@ -339,7 +502,7 @@ impl ProofEvaluations { #[cfg(feature = "ocaml_types")] pub mod caml { use super::*; - use crate::circuits::wires::COLUMNS; + use crate::circuits::wires::KIMCHI_COLS; use poly_commitment::commitment::caml::CamlPolyComm; // @@ -383,59 +546,6 @@ pub mod caml { } } - // - // CamlLookupEvaluations - // - - #[derive(Clone, ocaml::IntoValue, ocaml::FromValue, ocaml_gen::Struct)] - pub struct CamlLookupEvaluations { - pub sorted: Vec>>, - pub aggreg: PointEvaluations>, - pub table: PointEvaluations>, - pub runtime: Option>>, - } - - impl From>>> for CamlLookupEvaluations - where - F: Clone, - CamlF: From, - { - fn from(le: LookupEvaluations>>) -> Self { - Self { - sorted: le - .sorted - .into_iter() - .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())) - .collect(), - aggreg: le.aggreg.map(&|x| x.into_iter().map(Into::into).collect()), - table: le.table.map(&|x| x.into_iter().map(Into::into).collect()), - runtime: le - .runtime - .map(|r| r.map(&|r| r.into_iter().map(Into::into).collect())), - } - } - } - - impl From> for LookupEvaluations>> - where - F: From + Clone, - { - fn from(pe: CamlLookupEvaluations) -> Self { - Self { - sorted: pe - .sorted - .into_iter() - .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())) - .collect(), - aggreg: pe.aggreg.map(&|x| x.into_iter().map(Into::into).collect()), - table: pe.table.map(&|x| x.into_iter().map(Into::into).collect()), - runtime: pe - .runtime - .map(|r| r.map(&|r| r.into_iter().map(Into::into).collect())), - } - } - } - // // CamlProofEvaluations // @@ -486,23 +596,46 @@ pub mod caml { PointEvaluations>, PointEvaluations>, ), - pub lookup: Option>, pub generic_selector: PointEvaluations>, pub poseidon_selector: PointEvaluations>, + pub complete_add_selector: PointEvaluations>, + pub mul_selector: PointEvaluations>, + pub emul_selector: PointEvaluations>, + pub endomul_scalar_selector: PointEvaluations>, + + pub range_check0_selector: Option>>, + pub range_check1_selector: Option>>, + pub foreign_field_add_selector: Option>>, + pub foreign_field_mul_selector: Option>>, + pub xor_selector: Option>>, + pub rot_selector: Option>>, + pub lookup_aggregation: Option>>, + pub lookup_table: Option>>, + pub lookup_sorted: Vec>>>, + pub runtime_lookup_table: Option>>, + + pub runtime_lookup_table_selector: Option>>, + pub xor_lookup_selector: Option>>, + pub lookup_gate_lookup_selector: Option>>, + pub range_check_lookup_selector: Option>>, + pub foreign_field_mul_lookup_selector: Option>>, } // // ProofEvaluations> <-> CamlProofEvaluations // - impl From>>> - for CamlProofEvaluations + impl From>, KIMCHI_COLS>> + for ( + Option>>, + CamlProofEvaluations, + ) where F: Clone, CamlF: From, { - fn from(pe: ProofEvaluations>>) -> Self { + fn from(pe: ProofEvaluations>, KIMCHI_COLS>) -> Self { let w = ( pe.w[0] .clone() @@ -618,30 +751,105 @@ pub mod caml { .map(&|x| x.into_iter().map(Into::into).collect()), ); - Self { - w, - coefficients, - z: pe.z.map(&|x| x.into_iter().map(Into::into).collect()), - s, - generic_selector: pe - .generic_selector - .map(&|x| x.into_iter().map(Into::into).collect()), - poseidon_selector: pe - .poseidon_selector - .map(&|x| x.into_iter().map(Into::into).collect()), - lookup: pe.lookup.map(Into::into), - } + ( + pe.public + .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())), + CamlProofEvaluations { + w, + coefficients, + z: pe.z.map(&|x| x.into_iter().map(Into::into).collect()), + s, + generic_selector: pe + .generic_selector + .map(&|x| x.into_iter().map(Into::into).collect()), + poseidon_selector: pe + .poseidon_selector + .map(&|x| x.into_iter().map(Into::into).collect()), + complete_add_selector: pe + .complete_add_selector + .map(&|x| x.into_iter().map(Into::into).collect()), + mul_selector: pe + .mul_selector + .map(&|x| x.into_iter().map(Into::into).collect()), + emul_selector: pe + .emul_selector + .map(&|x| x.into_iter().map(Into::into).collect()), + endomul_scalar_selector: pe + .endomul_scalar_selector + .map(&|x| x.into_iter().map(Into::into).collect()), + range_check0_selector: pe + .range_check0_selector + .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())), + range_check1_selector: pe + .range_check1_selector + .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())), + foreign_field_add_selector: pe + .foreign_field_add_selector + .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())), + foreign_field_mul_selector: pe + .foreign_field_mul_selector + .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())), + xor_selector: pe + .xor_selector + .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())), + rot_selector: pe + .rot_selector + .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())), + lookup_aggregation: pe + .lookup_aggregation + .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())), + lookup_table: pe + .lookup_table + .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())), + lookup_sorted: pe + .lookup_sorted + .iter() + .map(|x| { + x.as_ref().map(|x| { + x.map_ref(&|x| x.clone().into_iter().map(Into::into).collect()) + }) + }) + .collect::>(), + runtime_lookup_table: pe + .runtime_lookup_table + .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())), + runtime_lookup_table_selector: pe + .runtime_lookup_table_selector + .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())), + xor_lookup_selector: pe + .xor_lookup_selector + .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())), + lookup_gate_lookup_selector: pe + .lookup_gate_lookup_selector + .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())), + range_check_lookup_selector: pe + .range_check_lookup_selector + .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())), + foreign_field_mul_lookup_selector: pe + .foreign_field_mul_lookup_selector + .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())), + }, + ) } } - impl From> - for ProofEvaluations>> + impl + From<( + Option>>, + CamlProofEvaluations, + )> for ProofEvaluations>, KIMCHI_COLS> where F: Clone, + CamlF: Clone, F: From, { - fn from(cpe: CamlProofEvaluations) -> Self { - let w = vec![ + fn from( + (public, cpe): ( + Option>>, + CamlProofEvaluations, + ), + ) -> Self { + let w = [ cpe.w.0.map(&|x| x.into_iter().map(Into::into).collect()), cpe.w.1.map(&|x| x.into_iter().map(Into::into).collect()), cpe.w.2.map(&|x| x.into_iter().map(Into::into).collect()), @@ -658,7 +866,7 @@ pub mod caml { cpe.w.13.map(&|x| x.into_iter().map(Into::into).collect()), cpe.w.14.map(&|x| x.into_iter().map(Into::into).collect()), ]; - let coefficients = vec![ + let coefficients = [ cpe.coefficients .0 .map(&|x| x.into_iter().map(Into::into).collect()), @@ -715,6 +923,7 @@ pub mod caml { ]; Self { + public: public.map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())), w, coefficients, z: cpe.z.map(&|x| x.into_iter().map(Into::into).collect()), @@ -725,7 +934,72 @@ pub mod caml { poseidon_selector: cpe .poseidon_selector .map(&|x| x.into_iter().map(Into::into).collect()), - lookup: cpe.lookup.map(Into::into), + complete_add_selector: cpe + .complete_add_selector + .map(&|x| x.into_iter().map(Into::into).collect()), + mul_selector: cpe + .mul_selector + .map(&|x| x.into_iter().map(Into::into).collect()), + emul_selector: cpe + .emul_selector + .map(&|x| x.into_iter().map(Into::into).collect()), + endomul_scalar_selector: cpe + .endomul_scalar_selector + .map(&|x| x.into_iter().map(Into::into).collect()), + range_check0_selector: cpe + .range_check0_selector + .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())), + range_check1_selector: cpe + .range_check1_selector + .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())), + foreign_field_add_selector: cpe + .foreign_field_add_selector + .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())), + foreign_field_mul_selector: cpe + .foreign_field_mul_selector + .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())), + xor_selector: cpe + .xor_selector + .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())), + rot_selector: cpe + .rot_selector + .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())), + keccak_round_selector: None, + keccak_sponge_selector: None, + lookup_aggregation: cpe + .lookup_aggregation + .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())), + lookup_table: cpe + .lookup_table + .map(|x| x.map(&|x| x.into_iter().map(Into::into).collect())), + lookup_sorted: { + assert_eq!(cpe.lookup_sorted.len(), 5); // Invalid proof + array::from_fn(|i| { + cpe.lookup_sorted[i] + .as_ref() + .map(|x| x.clone().map(&|x| x.into_iter().map(Into::into).collect())) + }) + }, + runtime_lookup_table: cpe + .runtime_lookup_table + .map(|x| x.map(&|x| x.iter().map(|x| x.clone().into()).collect())), + runtime_lookup_table_selector: cpe + .runtime_lookup_table_selector + .map(|x| x.map(&|x| x.iter().map(|x| x.clone().into()).collect())), + xor_lookup_selector: cpe + .xor_lookup_selector + .map(|x| x.map(&|x| x.iter().map(|x| x.clone().into()).collect())), + lookup_gate_lookup_selector: cpe + .lookup_gate_lookup_selector + .map(|x| x.map(&|x| x.iter().map(|x| x.clone().into()).collect())), + range_check_lookup_selector: cpe + .range_check_lookup_selector + .map(|x| x.map(&|x| x.iter().map(|x| x.clone().into()).collect())), + foreign_field_mul_lookup_selector: cpe + .foreign_field_mul_lookup_selector + .map(|x| x.map(&|x| x.iter().map(|x| x.clone().into()).collect())), + keccak_round_lookup_selector: None, + keccak_sponge_lookup_selector: None, } } } diff --git a/kimchi/src/prover.rs b/kimchi/src/prover.rs index aeb7fd5814..63d783dff6 100644 --- a/kimchi/src/prover.rs +++ b/kimchi/src/prover.rs @@ -3,6 +3,7 @@ use crate::{ circuits::{ argument::{Argument, ArgumentType}, + constraints::zk_rows_strict_lower_bound, expr::{self, l0_1, Constants, Environment, LookupEnvironment}, gate::GateType, lookup::{self, runtime_tables::RuntimeTable, tables::combine_table_entry}, @@ -15,7 +16,6 @@ use crate::{ generic, keccak::circuitgates::{KeccakRound, KeccakSponge}, permutation, - permutation::ZK_ROWS, poseidon::Poseidon, range_check::circuitgates::{RangeCheck0, RangeCheck1}, rot::Rot64, @@ -29,12 +29,12 @@ use crate::{ lagrange_basis_evaluations::LagrangeBasisEvaluations, plonk_sponge::FrSponge, proof::{ - LookupCommitments, LookupEvaluations, PointEvaluations, ProofEvaluations, - ProverCommitments, ProverProof, RecursionChallenge, + LookupCommitments, PointEvaluations, ProofEvaluations, ProverCommitments, ProverProof, + RecursionChallenge, }, prover_index::ProverIndex, + verifier_index::VerifierIndex, }; -use ark_ec::ProjectiveCurve; use ark_ff::{FftField, Field, One, PrimeField, UniformRand, Zero}; use ark_poly::{ univariate::DensePolynomial, EvaluationDomain, Evaluations, Polynomial, @@ -48,6 +48,7 @@ use poly_commitment::{ absorb_commitment, b_poly_coefficients, BlindedCommitment, CommitmentCurve, PolyComm, }, evaluation_proof::DensePolynomialOrEvaluations, + OpenProof, SRS as _, }; use rayon::prelude::*; use std::array; @@ -106,8 +107,15 @@ where aggreg_comm: Option>, aggreg8: Option>>, - /// The evaluations of the aggregation polynomial for the proof - eval: Option>>>, + // lookup-related evaluations + /// evaluation of lookup aggregation polynomial + pub lookup_aggregation_eval: Option>>, + /// evaluation of lookup table polynomial + pub lookup_table_eval: Option>>, + /// evaluation of lookup sorted polynomials + pub lookup_sorted_eval: [Option>>; 5], + /// evaluation of runtime lookup table polynomial + pub runtime_lookup_table_eval: Option>>, /// Runtime table runtime_table: Option>, @@ -116,7 +124,8 @@ where runtime_second_col_d8: Option>>, } -impl ProverProof +impl, const COLUMNS: usize> + ProverProof where G::BaseField: PrimeField, { @@ -127,13 +136,16 @@ where /// Will give error if `create_recursive` process fails. pub fn create< EFqSponge: Clone + FqSponge, - EFrSponge: FrSponge, + EFrSponge: FrSponge, >( groupmap: &G::Map, - witness: [Vec; W], + witness: [Vec; COLUMNS], runtime_tables: &[RuntimeTable], - index: &ProverIndex, - ) -> Result { + index: &ProverIndex, + ) -> Result + where + VerifierIndex: Clone, + { Self::create_recursive::( groupmap, witness, @@ -155,25 +167,29 @@ where /// Will panic if `lookup_context.joint_lookup_table_d8` is None. pub fn create_recursive< EFqSponge: Clone + FqSponge, - EFrSponge: FrSponge, + EFrSponge: FrSponge, >( group_map: &G::Map, - mut witness: [Vec; W], + mut witness: [Vec; COLUMNS], runtime_tables: &[RuntimeTable], - index: &ProverIndex, + index: &ProverIndex, prev_challenges: Vec>, - blinders: Option<[Option>; W]>, - ) -> Result { + blinders: Option<[Option>; COLUMNS]>, + ) -> Result + where + VerifierIndex: Clone, + { internal_tracing::checkpoint!(internal_traces; create_recursive); - - // make sure that the SRS is not smaller than the domain size let d1_size = index.cs.domain.d1.size(); - if index.srs.max_degree() < d1_size { - return Err(ProverError::SRSTooSmall); - } let (_, endo_r) = G::endos(); + let num_chunks = if d1_size < index.max_poly_size { + 1 + } else { + d1_size / index.max_poly_size + }; + // TODO: rng should be passed as arg let rng = &mut rand::rngs::OsRng; @@ -187,19 +203,27 @@ where //~ 1. Ensure we have room in the witness for the zero-knowledge rows. //~ We currently expect the witness not to be of the same length as the domain, //~ but instead be of the length of the (smaller) circuit. - //~ If we cannot add `ZK_ROWS` rows to the columns of the witness before reaching + //~ If we cannot add `zk_rows` rows to the columns of the witness before reaching //~ the size of the domain, abort. let length_witness = witness[0].len(); let length_padding = d1_size .checked_sub(length_witness) .ok_or(ProverError::NoRoomForZkInWitness)?; - if length_padding < ZK_ROWS as usize { + let zero_knowledge_limit = zk_rows_strict_lower_bound(num_chunks); + if (index.cs.zk_rows as usize) < zero_knowledge_limit { + return Err(ProverError::NotZeroKnowledge( + zero_knowledge_limit, + index.cs.zk_rows as usize, + )); + } + + if length_padding < index.cs.zk_rows as usize { return Err(ProverError::NoRoomForZkInWitness); } //~ 1. Pad the witness columns with Zero gates to make them the same length as the domain. - //~ Then, randomize the last `ZK_ROWS` of each columns. + //~ Then, randomize the last `zk_rows` of each columns. internal_tracing::checkpoint!(internal_traces; pad_witness); for w in &mut witness { if w.len() != length_witness { @@ -210,14 +234,14 @@ where w.extend(std::iter::repeat(G::ScalarField::zero()).take(length_padding)); // zk-rows - for row in w.iter_mut().rev().take(ZK_ROWS as usize) { + for row in w.iter_mut().rev().take(index.cs.zk_rows as usize) { *row = ::rand(rng); } } //~ 1. Setup the Fq-Sponge. internal_tracing::checkpoint!(internal_traces; set_up_fq_sponge); - let mut fq_sponge = EFqSponge::new(G::OtherCurve::sponge_params()); + let mut fq_sponge = EFqSponge::new(G::other_curve_sponge_params()); //~ 1. Absorb the digest of the VerifierIndex. let verifier_index_digest = index.verifier_index_digest::(); @@ -239,7 +263,7 @@ where .interpolate(); //~ 1. Commit (non-hiding) to the negated public input polynomial. - let public_comm = index.srs.commit_non_hiding(&public_poly, None); + let public_comm = index.srs.commit_non_hiding(&public_poly, num_chunks, None); let public_comm = { index .srs @@ -258,13 +282,13 @@ where //~ This is why we need to absorb the commitment to the public polynomial at this point. absorb_commitment(&mut fq_sponge, &public_comm); - //~ 1. Commit to the witness columns by creating `COLUMNS` hidding commitments. + //~ 1. Commit to the witness columns by creating `KIMCHI_COLS` hidding commitments. //~ //~ Note: since the witness is in evaluation form, //~ we can use the `commit_evaluation` optimization. internal_tracing::checkpoint!(internal_traces; commit_to_witness_columns); let mut w_comm = vec![]; - for col in 0..W { + for col in 0..COLUMNS { // witness coeff -> witness eval let witness_eval = Evaluations::>::from_vec_and_domain( @@ -293,7 +317,7 @@ where w_comm.push(com); } - let w_comm: [BlindedCommitment; W] = w_comm + let w_comm: [BlindedCommitment; COLUMNS] = w_comm .try_into() .expect("previous loop is of the correct length"); @@ -302,11 +326,11 @@ where .iter() .for_each(|c| absorb_commitment(&mut fq_sponge, &c.commitment)); - //~ 1. Compute the witness polynomials by interpolating each `COLUMNS` of the witness. + //~ 1. Compute the witness polynomials by interpolating each `KIMCHI_COLS` of the witness. //~ As mentioned above, we commit using the evaluations form rather than the coefficients //~ form so we can take advantage of the sparsity of the evaluations (i.e., there are many //~ 0 entries and entries that have less-than-full-size field elemnts.) - let witness_poly: [DensePolynomial; W] = array::from_fn(|i| { + let witness_poly: [DensePolynomial; COLUMNS] = array::from_fn(|i| { Evaluations::>::from_vec_and_domain( witness[i].clone(), index.cs.domain.d1, @@ -353,7 +377,7 @@ where } // zero-knowledge - for e in evals.iter_mut().rev().take(ZK_ROWS as usize) { + for e in evals.iter_mut().rev().take(index.cs.zk_rows as usize) { *e = ::rand(rng); } @@ -369,7 +393,10 @@ where // commit the runtime polynomial // (and save it to the proof) - let runtime_table_comm = index.srs.commit(&runtime_table_contribution, None, rng); + let runtime_table_comm = + index + .srs + .commit(&runtime_table_contribution, num_chunks, None, rng); // absorb the commitment absorb_commitment(&mut fq_sponge, &runtime_table_comm.commitment); @@ -487,13 +514,21 @@ where joint_combiner, table_id_combiner, &lcs.configuration.lookup_info, + index.cs.zk_rows as usize, )?; //~~ * Randomize the last `EVALS` rows in each of the sorted polynomials //~~ in order to add zero-knowledge to the protocol. let sorted: Vec<_> = sorted .into_iter() - .map(|chunk| lookup::constraints::zk_patch(chunk, index.cs.domain.d1, rng)) + .map(|chunk| { + lookup::constraints::zk_patch( + chunk, + index.cs.domain.d1, + index.cs.zk_rows as usize, + rng, + ) + }) .collect(); //~~ * Commit each of the sorted polynomials. @@ -535,7 +570,7 @@ where //~~ * Compute the lookup aggregation polynomial. let joint_lookup_table_d8 = lookup_context.joint_lookup_table_d8.as_ref().unwrap(); - let aggreg = lookup::constraints::aggregation::( + let aggreg = lookup::constraints::aggregation::<_, G::ScalarField, COLUMNS>( lookup_context.dummy_lookup_value.unwrap(), joint_lookup_table_d8, index.cs.domain.d1, @@ -548,6 +583,7 @@ where lookup_context.sorted.as_ref().unwrap(), rng, &lcs.configuration.lookup_info, + index.cs.zk_rows as usize, )?; //~~ * Commit to the aggregation polynomial. @@ -574,7 +610,7 @@ where let z_poly = index.perm_aggreg(&witness, &beta, &gamma, rng)?; //~ 1. Commit (hidding) to the permutation aggregation polynomial $z$. - let z_comm = index.srs.commit(&z_poly, None, rng); + let z_comm = index.srs.commit(&z_poly, num_chunks, None, rng); //~ 1. Absorb the permutation aggregation polynomial $z$ with the Fq-Sponge. absorb_commitment(&mut fq_sponge, &z_comm.commitment); @@ -685,10 +721,14 @@ where joint_combiner: lookup_context.joint_combiner, endo_coefficient: index.cs.endo, mds, + zk_rows: index.cs.zk_rows, }, witness: &lagrange.d8.this.w, coefficient: &index.column_evaluations.coefficients8, - vanishes_on_last_4_rows: &index.cs.precomputations().vanishes_on_last_4_rows, + vanishes_on_zero_knowledge_and_previous_rows: &index + .cs + .precomputations() + .vanishes_on_zero_knowledge_and_previous_rows, z: &lagrange.d8.this.z, l0_1: l0_1(index.cs.domain.d1), domain: index.cs.domain, @@ -845,25 +885,7 @@ where }; //~ 1. commit (hiding) to the quotient polynomial $t$ - //~ TODO: specify the dummies - let t_comm = { - let mut t_comm = index.srs.commit("ient_poly, None, rng); - - let expected_t_size = PERMUTS; - let dummies = expected_t_size - t_comm.commitment.unshifted.len(); - // Add `dummies` many hiding commitments to the 0 polynomial, since if the - // number of commitments in `t_comm` is less than the max size, it means that - // the higher degree coefficients of `t` are 0. - for _ in 0..dummies { - let w = ::rand(rng); - t_comm - .commitment - .unshifted - .push(index.srs.h.mul(w).into_affine()); - t_comm.blinders.unshifted.push(w); - } - t_comm - }; + let t_comm = { index.srs.commit("ient_poly, 7 * num_chunks, None, rng) }; //~ 1. Absorb the the commitment of the quotient polynomial with the Fq-Sponge. absorb_commitment(&mut fq_sponge, &t_comm.commitment); @@ -884,7 +906,7 @@ where .aggreg_coeffs .as_ref() .unwrap() - .to_chunked_polynomial(index.max_poly_size); + .to_chunked_polynomial(num_chunks, index.max_poly_size); //~~ * the sorted polynomials let sorted = lookup_context @@ -892,35 +914,41 @@ where .as_ref() .unwrap() .iter() - .map(|c| c.to_chunked_polynomial(index.max_poly_size)); + .map(|c| c.to_chunked_polynomial(num_chunks, index.max_poly_size)) + .collect::>(); //~~ * the table polynonial let joint_table = lookup_context.joint_lookup_table.as_ref().unwrap(); - let joint_table = joint_table.to_chunked_polynomial(index.max_poly_size); + let joint_table = joint_table.to_chunked_polynomial(num_chunks, index.max_poly_size); - lookup_context.eval = Some(LookupEvaluations { - aggreg: PointEvaluations { - zeta: aggreg.evaluate_chunks(zeta), - zeta_omega: aggreg.evaluate_chunks(zeta_omega), - }, - sorted: sorted - .map(|sorted| PointEvaluations { + lookup_context.lookup_aggregation_eval = Some(PointEvaluations { + zeta: aggreg.evaluate_chunks(zeta), + zeta_omega: aggreg.evaluate_chunks(zeta_omega), + }); + lookup_context.lookup_table_eval = Some(PointEvaluations { + zeta: joint_table.evaluate_chunks(zeta), + zeta_omega: joint_table.evaluate_chunks(zeta_omega), + }); + lookup_context.lookup_sorted_eval = array::from_fn(|i| { + if i < sorted.len() { + let sorted = &sorted[i]; + Some(PointEvaluations { zeta: sorted.evaluate_chunks(zeta), zeta_omega: sorted.evaluate_chunks(zeta_omega), }) - .collect(), - table: PointEvaluations { - zeta: joint_table.evaluate_chunks(zeta), - zeta_omega: joint_table.evaluate_chunks(zeta_omega), - }, - runtime: lookup_context.runtime_table.as_ref().map(|runtime_table| { - let runtime_table = runtime_table.to_chunked_polynomial(index.max_poly_size); + } else { + None + } + }); + lookup_context.runtime_lookup_table_eval = + lookup_context.runtime_table.as_ref().map(|runtime_table| { + let runtime_table = + runtime_table.to_chunked_polynomial(num_chunks, index.max_poly_size); PointEvaluations { zeta: runtime_table.evaluate_chunks(zeta), zeta_omega: runtime_table.evaluate_chunks(zeta_omega), } - }), - }) + }); } //~ 1. Chunk evaluate the following polynomials at both $\zeta$ and $\zeta \omega$: @@ -943,62 +971,171 @@ where //~ TODO: do we want to specify more on that? It seems unecessary except for the t polynomial (or if for some reason someone sets that to a low value) internal_tracing::checkpoint!(internal_traces; lagrange_basis_eval_zeta_poly); - let zeta_evals = LagrangeBasisEvaluations::new(index.cs.domain.d1, zeta); + let zeta_evals = + LagrangeBasisEvaluations::new(index.max_poly_size, index.cs.domain.d1, zeta); internal_tracing::checkpoint!(internal_traces; lagrange_basis_eval_zeta_omega_poly); - - let zeta_omega_evals = LagrangeBasisEvaluations::new(index.cs.domain.d1, zeta_omega); + let zeta_omega_evals = + LagrangeBasisEvaluations::new(index.max_poly_size, index.cs.domain.d1, zeta_omega); let chunked_evals_for_selector = |p: &Evaluations>| PointEvaluations { - zeta: vec![zeta_evals.evaluate_boolean(p)], - zeta_omega: vec![zeta_omega_evals.evaluate_boolean(p)], + zeta: zeta_evals.evaluate_boolean(p), + zeta_omega: zeta_omega_evals.evaluate_boolean(p), }; let chunked_evals_for_evaluations = |p: &Evaluations>| PointEvaluations { - zeta: vec![zeta_evals.evaluate(p)], - zeta_omega: vec![zeta_omega_evals.evaluate(p)], + zeta: zeta_evals.evaluate(p), + zeta_omega: zeta_omega_evals.evaluate(p), }; internal_tracing::checkpoint!(internal_traces; chunk_eval_zeta_omega_poly); - let chunked_evals = ProofEvaluations::>> { + let chunked_evals = ProofEvaluations::>, COLUMNS> { + public: { + let chunked = public_poly.to_chunked_polynomial(num_chunks, index.max_poly_size); + Some(PointEvaluations { + zeta: chunked.evaluate_chunks(zeta), + zeta_omega: chunked.evaluate_chunks(zeta_omega), + }) + }, s: array::from_fn(|i| { chunked_evals_for_evaluations( &index.column_evaluations.permutation_coefficients8[i], ) }), - coefficients: index - .column_evaluations - .coefficients8 - .iter() - .map(|c| chunked_evals_for_evaluations(c)) - .collect(), - w: witness_poly - .iter() - .map(|w| { - let chunked = w.to_chunked_polynomial(index.max_poly_size); - PointEvaluations { - zeta: chunked.evaluate_chunks(zeta), - zeta_omega: chunked.evaluate_chunks(zeta_omega), - } - }) - .collect(), + coefficients: array::from_fn(|i| { + chunked_evals_for_evaluations(&index.column_evaluations.coefficients8[i]) + }), + w: array::from_fn(|i| { + let chunked = + witness_poly[i].to_chunked_polynomial(num_chunks, index.max_poly_size); + PointEvaluations { + zeta: chunked.evaluate_chunks(zeta), + zeta_omega: chunked.evaluate_chunks(zeta_omega), + } + }), z: { - let chunked = z_poly.to_chunked_polynomial(index.max_poly_size); + let chunked = z_poly.to_chunked_polynomial(num_chunks, index.max_poly_size); PointEvaluations { zeta: chunked.evaluate_chunks(zeta), zeta_omega: chunked.evaluate_chunks(zeta_omega), } }, - lookup: lookup_context.eval.take(), + lookup_aggregation: lookup_context.lookup_aggregation_eval.take(), + lookup_table: lookup_context.lookup_table_eval.take(), + lookup_sorted: array::from_fn(|i| lookup_context.lookup_sorted_eval[i].take()), + runtime_lookup_table: lookup_context.runtime_lookup_table_eval.take(), generic_selector: chunked_evals_for_selector( &index.column_evaluations.generic_selector4, ), poseidon_selector: chunked_evals_for_selector( &index.column_evaluations.poseidon_selector8, ), + complete_add_selector: chunked_evals_for_selector( + &index.column_evaluations.complete_add_selector4, + ), + mul_selector: chunked_evals_for_selector(&index.column_evaluations.mul_selector8), + emul_selector: chunked_evals_for_selector(&index.column_evaluations.emul_selector8), + endomul_scalar_selector: chunked_evals_for_selector( + &index.column_evaluations.endomul_scalar_selector8, + ), + + range_check0_selector: index + .column_evaluations + .range_check0_selector8 + .as_ref() + .map(chunked_evals_for_selector), + range_check1_selector: index + .column_evaluations + .range_check1_selector8 + .as_ref() + .map(chunked_evals_for_selector), + foreign_field_add_selector: index + .column_evaluations + .foreign_field_add_selector8 + .as_ref() + .map(chunked_evals_for_selector), + foreign_field_mul_selector: index + .column_evaluations + .foreign_field_mul_selector8 + .as_ref() + .map(chunked_evals_for_selector), + xor_selector: index + .column_evaluations + .xor_selector8 + .as_ref() + .map(chunked_evals_for_selector), + rot_selector: index + .column_evaluations + .rot_selector8 + .as_ref() + .map(chunked_evals_for_selector), + keccak_round_selector: index + .column_evaluations + .keccak_round_selector8 + .as_ref() + .map(chunked_evals_for_selector), + keccak_sponge_selector: index + .column_evaluations + .keccak_sponge_selector8 + .as_ref() + .map(chunked_evals_for_selector), + + runtime_lookup_table_selector: index.cs.lookup_constraint_system.as_ref().and_then( + |lcs| { + lcs.runtime_selector + .as_ref() + .map(chunked_evals_for_selector) + }, + ), + xor_lookup_selector: index.cs.lookup_constraint_system.as_ref().and_then(|lcs| { + lcs.lookup_selectors + .xor + .as_ref() + .map(chunked_evals_for_selector) + }), + lookup_gate_lookup_selector: index.cs.lookup_constraint_system.as_ref().and_then( + |lcs| { + lcs.lookup_selectors + .lookup + .as_ref() + .map(chunked_evals_for_selector) + }, + ), + range_check_lookup_selector: index.cs.lookup_constraint_system.as_ref().and_then( + |lcs| { + lcs.lookup_selectors + .range_check + .as_ref() + .map(chunked_evals_for_selector) + }, + ), + foreign_field_mul_lookup_selector: index.cs.lookup_constraint_system.as_ref().and_then( + |lcs| { + lcs.lookup_selectors + .ffmul + .as_ref() + .map(chunked_evals_for_selector) + }, + ), + /*keccak_round_lookup_selector: index.cs.lookup_constraint_system.as_ref().and_then( + |lcs| { + lcs.lookup_selectors + .keccak_round + .as_ref() + .map(chunked_evals_for_selector) + }, + ), + keccak_sponge_lookup_selector: index.cs.lookup_constraint_system.as_ref().and_then( + |lcs| { + lcs.lookup_selectors + .keccak_sponge + .as_ref() + .map(chunked_evals_for_selector) + }, + ),*/ }; let zeta_to_srs_len = zeta.pow([index.max_poly_size as u64]); @@ -1040,12 +1177,12 @@ where drop(env); // see https://o1-labs.github.io/mina-book/crypto/plonk/maller_15.html#the-prover-side - f.to_chunked_polynomial(index.max_poly_size) + f.to_chunked_polynomial(num_chunks, index.max_poly_size) .linearize(zeta_to_srs_len) }; let t_chunked = quotient_poly - .to_chunked_polynomial(index.max_poly_size) + .to_chunked_polynomial(7 * num_chunks, index.max_poly_size) .linearize(zeta_to_srs_len); &f_chunked - &t_chunked.scale(zeta_to_domain_size - G::ScalarField::one()) @@ -1101,16 +1238,6 @@ where }) .collect::>(); - //~ 1. Evaluate the negated public polynomial (if present) at $\zeta$ and $\zeta\omega$. - let public_evals = if public_poly.is_zero() { - [vec![G::ScalarField::zero()], vec![G::ScalarField::zero()]] - } else { - [ - vec![public_poly.evaluate(&zeta)], - vec![public_poly.evaluate(&zeta_omega)], - ] - }; - //~ 1. Absorb the unique evaluation of ft: $ft(\zeta\omega)$. fr_sponge.absorb(&ft_eval1); @@ -1121,8 +1248,8 @@ where //~~ * poseidon selector //~~ * the 15 register/witness //~~ * 6 sigmas evaluations (the last one is not evaluated) - fr_sponge.absorb_multiple(&public_evals[0]); - fr_sponge.absorb_multiple(&public_evals[1]); + fr_sponge.absorb_multiple(&chunked_evals.public.as_ref().unwrap().zeta); + fr_sponge.absorb_multiple(&chunked_evals.public.as_ref().unwrap().zeta_omega); fr_sponge.absorb_evaluations(&chunked_evals); //~ 1. Sample $v'$ with the Fr-Sponge @@ -1166,19 +1293,42 @@ where //~~ * the poseidon selector //~~ * the 15 registers/witness columns //~~ * the 6 sigmas - //~~ * optionally, the runtime table - polynomials.push((coefficients_form(&public_poly), None, fixed_hiding(1))); + polynomials.push(( + coefficients_form(&public_poly), + None, + fixed_hiding(num_chunks), + )); polynomials.push((coefficients_form(&ft), None, blinding_ft)); polynomials.push((coefficients_form(&z_poly), None, z_comm.blinders)); polynomials.push(( evaluations_form(&index.column_evaluations.generic_selector4), None, - fixed_hiding(1), + fixed_hiding(num_chunks), )); polynomials.push(( evaluations_form(&index.column_evaluations.poseidon_selector8), None, - fixed_hiding(1), + fixed_hiding(num_chunks), + )); + polynomials.push(( + evaluations_form(&index.column_evaluations.complete_add_selector4), + None, + fixed_hiding(num_chunks), + )); + polynomials.push(( + evaluations_form(&index.column_evaluations.mul_selector8), + None, + fixed_hiding(num_chunks), + )); + polynomials.push(( + evaluations_form(&index.column_evaluations.emul_selector8), + None, + fixed_hiding(num_chunks), + )); + polynomials.push(( + evaluations_form(&index.column_evaluations.endomul_scalar_selector8), + None, + fixed_hiding(num_chunks), )); polynomials.extend( witness_poly @@ -1192,16 +1342,73 @@ where .column_evaluations .coefficients8 .iter() - .map(|coefficientm| (evaluations_form(coefficientm), None, non_hiding(1))) + .map(|coefficientm| (evaluations_form(coefficientm), None, non_hiding(num_chunks))) .collect::>(), ); polynomials.extend( index.column_evaluations.permutation_coefficients8[0..PERMUTS - 1] .iter() - .map(|w| (evaluations_form(w), None, non_hiding(1))) + .map(|w| (evaluations_form(w), None, non_hiding(num_chunks))) .collect::>(), ); + //~~ * the optional gates + if let Some(range_check0_selector8) = + index.column_evaluations.range_check0_selector8.as_ref() + { + polynomials.push(( + evaluations_form(range_check0_selector8), + None, + non_hiding(num_chunks), + )); + } + if let Some(range_check1_selector8) = + index.column_evaluations.range_check1_selector8.as_ref() + { + polynomials.push(( + evaluations_form(range_check1_selector8), + None, + non_hiding(num_chunks), + )); + } + if let Some(foreign_field_add_selector8) = index + .column_evaluations + .foreign_field_add_selector8 + .as_ref() + { + polynomials.push(( + evaluations_form(foreign_field_add_selector8), + None, + non_hiding(num_chunks), + )); + } + if let Some(foreign_field_mul_selector8) = index + .column_evaluations + .foreign_field_mul_selector8 + .as_ref() + { + polynomials.push(( + evaluations_form(foreign_field_mul_selector8), + None, + non_hiding(num_chunks), + )); + } + if let Some(xor_selector8) = index.column_evaluations.xor_selector8.as_ref() { + polynomials.push(( + evaluations_form(xor_selector8), + None, + non_hiding(num_chunks), + )); + } + if let Some(rot_selector8) = index.column_evaluations.rot_selector8.as_ref() { + polynomials.push(( + evaluations_form(rot_selector8), + None, + non_hiding(num_chunks), + )); + } + + //~~ * optionally, the runtime table //~ 1. if using lookup: if let Some(lcs) = &index.cs.lookup_constraint_system { //~~ * add the lookup sorted polynomials @@ -1226,14 +1433,19 @@ where let runtime_comm = lookup_context.runtime_table_comm.as_ref().unwrap(); let joint_combiner = lookup_context.joint_combiner.as_ref().unwrap(); - let blinding = runtime_comm.blinders.unshifted[0]; + let unshifted = runtime_comm + .blinders + .unshifted + .iter() + .map(|blinding| *joint_combiner * blinding) + .collect(); PolyComm { - unshifted: vec![*joint_combiner * blinding], + unshifted, shifted: None, } } else { - non_hiding(1) + non_hiding(num_chunks) }; let joint_lookup_table = lookup_context.joint_lookup_table.as_ref().unwrap(); @@ -1251,11 +1463,42 @@ where runtime_table_comm.blinders.clone(), )); } + + //~~ * the lookup selectors + + if let Some(runtime_lookup_table_selector) = lcs.runtime_selector.as_ref() { + polynomials.push(( + evaluations_form(runtime_lookup_table_selector), + None, + non_hiding(1), + )) + } + if let Some(xor_lookup_selector) = lcs.lookup_selectors.xor.as_ref() { + polynomials.push((evaluations_form(xor_lookup_selector), None, non_hiding(1))) + } + if let Some(lookup_gate_selector) = lcs.lookup_selectors.lookup.as_ref() { + polynomials.push((evaluations_form(lookup_gate_selector), None, non_hiding(1))) + } + if let Some(range_check_lookup_selector) = lcs.lookup_selectors.range_check.as_ref() { + polynomials.push(( + evaluations_form(range_check_lookup_selector), + None, + non_hiding(1), + )) + } + if let Some(foreign_field_mul_lookup_selector) = lcs.lookup_selectors.ffmul.as_ref() { + polynomials.push(( + evaluations_form(foreign_field_mul_lookup_selector), + None, + non_hiding(1), + )) + } } //~ 1. Create an aggregated evaluation proof for all of these polynomials at $\zeta$ and $\zeta\omega$ using $u$ and $v$. internal_tracing::checkpoint!(internal_traces; create_aggregated_evaluation_proof); - let proof = index.srs.open( + let proof = OpenProof::open( + &*index.srs, group_map, &polynomials, &[zeta, zeta_omega], @@ -1316,14 +1559,23 @@ internal_tracing::decl_traces!(internal_traces; #[cfg(feature = "ocaml_types")] pub mod caml { use super::*; - use crate::circuits::wires::COLUMNS; + use crate::circuits::wires::KIMCHI_COLS; use crate::proof::caml::{CamlProofEvaluations, CamlRecursionChallenge}; use ark_ec::AffineCurve; - use poly_commitment::commitment::caml::{CamlOpeningProof, CamlPolyComm}; + use poly_commitment::{ + commitment::caml::{CamlOpeningProof, CamlPolyComm}, + evaluation_proof::OpeningProof, + }; #[cfg(feature = "internal_tracing")] pub use internal_traces::caml::CamlTraces as CamlProverTraces; + #[derive(ocaml::IntoValue, ocaml::FromValue, ocaml_gen::Struct)] + pub struct CamlProofWithPublic { + pub public_evals: Option>>, + pub proof: CamlProverProof, + } + // // CamlProverProof // @@ -1439,12 +1691,12 @@ pub mod caml { // CamlProverCommitments <-> ProverCommitments // - impl From> for CamlProverCommitments + impl From> for CamlProverCommitments where G: AffineCurve, CamlPolyComm: From>, { - fn from(prover_comm: ProverCommitments) -> Self { + fn from(prover_comm: ProverCommitments) -> Self { Self { w_comm: ( prover_comm.w_comm[0].clone().into(), @@ -1470,12 +1722,14 @@ pub mod caml { } } - impl From> for ProverCommitments + impl From> for ProverCommitments where G: AffineCurve, PolyComm: From>, { - fn from(caml_prover_comm: CamlProverCommitments) -> ProverCommitments { + fn from( + caml_prover_comm: CamlProverCommitments, + ) -> ProverCommitments { let ( w_comm0, w_comm1, @@ -1519,41 +1773,64 @@ pub mod caml { } // - // ProverProof <-> CamlProverProof + // ProverProof <-> CamlProofWithPublic // - impl From<(ProverProof, Vec)> - for CamlProverProof + impl + From<( + ProverProof, KIMCHI_COLS>, + Vec, + )> for CamlProofWithPublic where G: AffineCurve, CamlG: From, CamlF: From, { - fn from(pp: (ProverProof, Vec)) -> Self { - Self { - commitments: pp.0.commitments.into(), - proof: pp.0.proof.into(), - evals: pp.0.evals.into(), - ft_eval1: pp.0.ft_eval1.into(), - public: pp.1.into_iter().map(Into::into).collect(), - prev_challenges: pp.0.prev_challenges.into_iter().map(Into::into).collect(), + fn from( + pp: ( + ProverProof, KIMCHI_COLS>, + Vec, + ), + ) -> Self { + let (public_evals, evals) = pp.0.evals.into(); + CamlProofWithPublic { + public_evals, + proof: CamlProverProof { + commitments: pp.0.commitments.into(), + proof: pp.0.proof.into(), + evals, + ft_eval1: pp.0.ft_eval1.into(), + public: pp.1.into_iter().map(Into::into).collect(), + prev_challenges: pp.0.prev_challenges.into_iter().map(Into::into).collect(), + }, } } } - impl From> - for (ProverProof, Vec) + impl From> + for ( + ProverProof, KIMCHI_COLS>, + Vec, + ) where + CamlF: Clone, G: AffineCurve + From, G::ScalarField: From, { fn from( - caml_pp: CamlProverProof, - ) -> (ProverProof, Vec) { + caml_pp: CamlProofWithPublic, + ) -> ( + ProverProof, KIMCHI_COLS>, + Vec, + ) { + let CamlProofWithPublic { + public_evals, + proof: caml_pp, + } = caml_pp; let proof = ProverProof { commitments: caml_pp.commitments.into(), proof: caml_pp.proof.into(), - evals: caml_pp.evals.into(), + evals: (public_evals, caml_pp.evals).into(), ft_eval1: caml_pp.ft_eval1.into(), prev_challenges: caml_pp .prev_challenges diff --git a/kimchi/src/prover_index.rs b/kimchi/src/prover_index.rs index 3fbc2eaf7a..edd7087951 100644 --- a/kimchi/src/prover_index.rs +++ b/kimchi/src/prover_index.rs @@ -3,16 +3,18 @@ use crate::{ alphas::Alphas, circuits::{ + berkeley_columns::Column, constraints::{ColumnEvaluations, ConstraintSystem}, expr::{Linearization, PolishToken}, + wires::KIMCHI_COLS, }, curve::KimchiCurve, linearization::expr_linearization, verifier_index::VerifierIndex, }; -use ark_poly::EvaluationDomain; +use ark_ff::PrimeField; use mina_poseidon::FqSponge; -use poly_commitment::srs::SRS; +use poly_commitment::{OpenProof, SRS as _}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use serde_with::serde_as; use std::sync::Arc; @@ -21,14 +23,18 @@ use std::sync::Arc; #[serde_as] #[derive(Serialize, Deserialize, Debug, Clone)] //~spec:startcode -pub struct ProverIndex { +pub struct ProverIndex< + G: KimchiCurve, + OpeningProof: OpenProof, + const COLUMNS: usize = KIMCHI_COLS, +> { /// constraints system polynomials #[serde(bound = "ConstraintSystem: Serialize + DeserializeOwned")] pub cs: ConstraintSystem, /// The symbolic linearization of our circuit, which can compile to concrete types once certain values are learned in the protocol. #[serde(skip)] - pub linearization: Linearization>>, + pub linearization: Linearization>, Column>, /// The mapping between powers of alpha and constraints #[serde(skip)] @@ -36,17 +42,18 @@ pub struct ProverIndex { /// polynomial commitment keys #[serde(skip)] - pub srs: Arc>, + #[serde(bound(deserialize = "OpeningProof::SRS: Default"))] + pub srs: Arc, /// maximal size of polynomial section pub max_poly_size: usize, - #[serde(bound = "ColumnEvaluations: Serialize + DeserializeOwned")] - pub column_evaluations: ColumnEvaluations, + #[serde(bound = "ColumnEvaluations: Serialize + DeserializeOwned")] + pub column_evaluations: ColumnEvaluations, /// The verifier index corresponding to this prover index #[serde(skip)] - pub verifier_index: Option>, + pub verifier_index: Option>, /// The verifier index digest corresponding to this prover index #[serde_as(as = "Option")] @@ -54,29 +61,23 @@ pub struct ProverIndex { } //~spec:endcode -impl ProverIndex { +impl, const COLUMNS: usize> + ProverIndex +where + G::BaseField: PrimeField, +{ /// this function compiles the index from constraints - /// - /// # Panics - /// - /// Will panic if `polynomial segment size` is bigger than `circuit`. pub fn create( mut cs: ConstraintSystem, endo_q: G::ScalarField, - srs: Arc>, + srs: Arc, ) -> Self { - let max_poly_size = srs.g.len(); - if cs.public > 0 { - assert!( - max_poly_size >= cs.domain.d1.size(), - "polynomial segment size has to be not smaller than that of the circuit!" - ); - } + let max_poly_size = srs.max_poly_size(); cs.endo = endo_q; // pre-compute the linearization let (linearization, powers_of_alpha) = - expr_linearization::(Some(&cs.feature_flags), true); + expr_linearization::(Some(&cs.feature_flags), true); let evaluated_column_coefficients = cs.evaluated_column_coefficients(); @@ -100,7 +101,10 @@ impl ProverIndex { EFqSponge: Clone + FqSponge, >( &mut self, - ) -> G::BaseField { + ) -> G::BaseField + where + VerifierIndex: Clone, + { if let Some(verifier_index_digest) = self.verifier_index_digest { return verifier_index_digest; } @@ -117,7 +121,10 @@ impl ProverIndex { /// Retrieve or compute the digest for the corresponding verifier index. pub fn verifier_index_digest>( &self, - ) -> G::BaseField { + ) -> G::BaseField + where + VerifierIndex: Clone, + { if let Some(verifier_index_digest) = self.verifier_index_digest { return verifier_index_digest; } @@ -142,21 +149,25 @@ pub mod testing { precomputed_srs, }; use ark_ff::{PrimeField, SquareRootField}; - use poly_commitment::srs::endos; - - /// Create new index for lookups. - /// - /// # Panics - /// - /// Will panic if `constraint system` is not built with `gates` input. - pub fn new_index_for_test_with_lookups( + use ark_poly::{EvaluationDomain, Radix2EvaluationDomain as D}; + use poly_commitment::{evaluation_proof::OpeningProof, srs::SRS, OpenProof}; + + #[allow(clippy::too_many_arguments)] + pub fn new_index_for_test_with_lookups_and_custom_srs< + G: KimchiCurve, + OpeningProof: OpenProof, + const COLUMNS: usize, + F: FnMut(D, usize) -> OpeningProof::SRS, + >( gates: Vec>, public: usize, prev_challenges: usize, lookup_tables: Vec>, runtime_tables: Option>>, disable_gates_checks: bool, - ) -> ProverIndex + override_srs_size: Option, + mut get_srs: F, + ) -> ProverIndex where G::BaseField: PrimeField, G::ScalarField: PrimeField + SquareRootField, @@ -168,32 +179,68 @@ pub mod testing { .public(public) .prev_challenges(prev_challenges) .disable_gates_checks(disable_gates_checks) - .build::() + .max_poly_size(override_srs_size) + .build::() .unwrap(); - let mut srs = if cs.domain.d1.log_size_of_group <= precomputed_srs::SERIALIZED_SRS_SIZE { - // TODO: we should trim it if it's smaller - precomputed_srs::get_srs() - } else { - // TODO: we should resume the SRS generation starting from the serialized one - SRS::::create(cs.domain.d1.size()) - }; - - srs.add_lagrange_basis(cs.domain.d1); + let srs_size = override_srs_size.unwrap_or_else(|| cs.domain.d1.size()); + let srs = get_srs(cs.domain.d1, srs_size); let srs = Arc::new(srs); - let (endo_q, _endo_r) = endos::(); - ProverIndex::::create(cs, endo_q, srs) + let &endo_q = G::other_curve_endo(); + ProverIndex::create(cs, endo_q, srs) + } + + /// Create new index for lookups. + /// + /// # Panics + /// + /// Will panic if `constraint system` is not built with `gates` input. + pub fn new_index_for_test_with_lookups( + gates: Vec>, + public: usize, + prev_challenges: usize, + lookup_tables: Vec>, + runtime_tables: Option>>, + disable_gates_checks: bool, + override_srs_size: Option, + ) -> ProverIndex, COLUMNS> + where + G::BaseField: PrimeField, + G::ScalarField: PrimeField + SquareRootField, + { + new_index_for_test_with_lookups_and_custom_srs( + gates, + public, + prev_challenges, + lookup_tables, + runtime_tables, + disable_gates_checks, + override_srs_size, + |d1: D, size: usize| { + let log2_size = size.ilog2(); + let mut srs = if log2_size <= precomputed_srs::SERIALIZED_SRS_SIZE { + // TODO: we should trim it if it's smaller + precomputed_srs::get_srs() + } else { + // TODO: we should resume the SRS generation starting from the serialized one + SRS::::create(size) + }; + + srs.add_lagrange_basis(d1); + srs + }, + ) } - pub fn new_index_for_test( + pub fn new_index_for_test( gates: Vec>, public: usize, - ) -> ProverIndex + ) -> ProverIndex, COLUMNS> where G::BaseField: PrimeField, G::ScalarField: PrimeField + SquareRootField, { - new_index_for_test_with_lookups::(gates, public, 0, vec![], None, false) + new_index_for_test_with_lookups::(gates, public, 0, vec![], None, false, None) } } diff --git a/kimchi/src/snarky/constants.rs b/kimchi/src/snarky/constants.rs index bf11fce2a8..2324d4ff87 100644 --- a/kimchi/src/snarky/constants.rs +++ b/kimchi/src/snarky/constants.rs @@ -1,9 +1,7 @@ //! Constants used for poseidon. -use ark_ec::AffineCurve; use ark_ff::Field; use mina_poseidon::poseidon::ArithmeticSpongeParams; -use poly_commitment::commitment::CommitmentCurve; use crate::curve::KimchiCurve; @@ -20,10 +18,8 @@ where { pub fn new>() -> Self { let poseidon = Curve::sponge_params().clone(); - let (endo_q, _endo_r) = Curve::OtherCurve::endos(); - let base = Curve::OtherCurve::prime_subgroup_generator() - .to_coordinates() - .unwrap(); + let endo_q = Curve::other_curve_endo(); + let base = Curve::other_curve_prime_subgroup_generator(); Self { poseidon, diff --git a/kimchi/src/snarky/constraint_system.rs b/kimchi/src/snarky/constraint_system.rs index ddf94409f8..f95bb07137 100644 --- a/kimchi/src/snarky/constraint_system.rs +++ b/kimchi/src/snarky/constraint_system.rs @@ -2,7 +2,7 @@ use crate::circuits::gate::{CircuitGate, GateType}; use crate::circuits::polynomials::poseidon::{ROUNDS_PER_HASH, SPONGE_WIDTH}; -use crate::circuits::wires::{Wire, COLUMNS, PERMUTS}; +use crate::circuits::wires::{Wire, KIMCHI_COLS, PERMUTS}; use ark_ff::PrimeField; use itertools::Itertools; use std::collections::{HashMap, HashSet}; @@ -194,7 +194,7 @@ enum V { while it is being written. */ #[derive(Clone)] -enum Circuit +enum Circuit where F: PrimeField, { @@ -225,7 +225,7 @@ where A gate is finalized once [finalize_and_get_gates](SnarkyConstraintSystem::finalize_and_get_gates) is called. The finalized tag contains the digest of the circuit. */ - gates: Circuit, + gates: Circuit, /** The row to use the next time we add a constraint. */ next_row: usize, /** The size of the public input (which fills the first rows of our constraint system. */ @@ -289,7 +289,7 @@ impl SnarkyConstraintSystem { let mut internal_values = HashMap::new(); let public_input_size = self.public_input_size.unwrap(); let num_rows = public_input_size + self.next_row; - let mut res = vec![vec![Field::zero(); num_rows]; COLUMNS]; + let mut res = vec![vec![Field::zero(); num_rows]; KIMCHI_COLS]; for i in 0..public_input_size { res[0][i] = external_values(i + 1); } diff --git a/kimchi/src/tests/and.rs b/kimchi/src/tests/and.rs index 2c35d5ae2c..a1ef13afd7 100644 --- a/kimchi/src/tests/and.rs +++ b/kimchi/src/tests/and.rs @@ -2,7 +2,7 @@ use crate::{ circuits::{ constraints::ConstraintSystem, gate::{CircuitGate, CircuitGateError, GateType}, - polynomial::COLUMNS, + polynomial::KIMCHI_COLS, polynomials::{and, xor}, wires::Wire, }, @@ -49,7 +49,7 @@ where // Manually checks the AND of the witness fn check_and( - witness: &[Vec; COLUMNS], + witness: &[Vec; KIMCHI_COLS], bytes: usize, input1: G::ScalarField, input2: G::ScalarField, @@ -74,7 +74,7 @@ fn setup_and( bytes: usize, ) -> ( ConstraintSystem, - [Vec; COLUMNS], + [Vec; KIMCHI_COLS], ) where G::BaseField: PrimeField, @@ -82,7 +82,9 @@ where let rng = &mut StdRng::from_seed(RNG_SEED); let gates = create_test_gates_and::(bytes); - let cs = ConstraintSystem::create(gates).build::().unwrap(); + let cs = ConstraintSystem::create(gates) + .build::() + .unwrap(); // Initalize inputs let input1 = rng.gen(input1, Some(bytes * 8)); @@ -99,7 +101,7 @@ fn test_and( input1: Option, input2: Option, bytes: usize, -) -> [Vec; COLUMNS] +) -> [Vec; KIMCHI_COLS] where G::BaseField: PrimeField, { @@ -107,7 +109,7 @@ where for row in 0..witness[0].len() { assert_eq!( - cs.gates[row].verify_witness::( + cs.gates[row].verify_witness::( row, &witness, &cs, @@ -125,7 +127,7 @@ fn prove_and_verify(bytes: usize) where G::BaseField: PrimeField, EFqSponge: Clone + FqSponge, - EFrSponge: FrSponge, + EFrSponge: FrSponge, { let rng = &mut StdRng::from_seed(RNG_SEED); @@ -140,7 +142,7 @@ where // Create witness let witness = and::create_and_witness(input1, input2, bytes); - TestFramework::::default() + TestFramework::::default() .gates(gates) .witness(witness) .setup() @@ -235,13 +237,13 @@ fn test_and_overflow_one() { } fn verify_bad_and_decomposition( - witness: &mut [Vec; COLUMNS], + witness: &mut [Vec; KIMCHI_COLS], cs: ConstraintSystem, ) where G::BaseField: PrimeField, { // modify by one each of the witness cells individually - for col in 0..COLUMNS { + for col in 0..KIMCHI_COLS { // first three columns make fail the ith+1 constraint // for the rest, the first 4 make the 1st fail, the following 4 make the 2nd fail, the last 4 make the 3rd fail let bad = if col < 3 { col + 1 } else { (col - 3) / 4 + 1 }; @@ -251,7 +253,7 @@ fn verify_bad_and_decomposition( // Update copy constraints of generic gate if col < 2 { assert_eq!( - cs.gates[0].verify_witness::( + cs.gates[0].verify_witness::( 0, witness, &cs, @@ -267,7 +269,7 @@ fn verify_bad_and_decomposition( } if col == 2 { assert_eq!( - cs.gates[0].verify_witness::( + cs.gates[0].verify_witness::( 0, witness, &cs, @@ -285,7 +287,12 @@ fn verify_bad_and_decomposition( witness[4][and_row] += G::ScalarField::one(); } assert_eq!( - cs.gates[0].verify_witness::(0, witness, &cs, &witness[0][0..cs.public]), + cs.gates[0].verify_witness::( + 0, + witness, + &cs, + &witness[0][0..cs.public] + ), Err(CircuitGateError::Constraint(GateType::Xor16, bad)) ); witness[col][xor_row] -= G::ScalarField::one(); @@ -298,7 +305,7 @@ fn verify_bad_and_decomposition( } // undo changes assert_eq!( - cs.gates[0].verify_witness::(0, witness, &cs, &witness[0][0..cs.public]), + cs.gates[0].verify_witness::(0, witness, &cs, &witness[0][0..cs.public]), Ok(()) ); } @@ -328,12 +335,12 @@ fn test_bad_and() { // Corrupt the witness: modify the output to be all zero witness[2][0] = PallasField::zero(); for i in 1..=4 { - witness[COLUMNS - i][0] = PallasField::zero(); + witness[KIMCHI_COLS - i][0] = PallasField::zero(); } witness[4][2] = PallasField::zero(); assert_eq!( - TestFramework::::default() + TestFramework::::default() .gates(gates) .witness(witness) .setup() diff --git a/kimchi/src/tests/chunked.rs b/kimchi/src/tests/chunked.rs index c044054bb3..e37abf69d1 100644 --- a/kimchi/src/tests/chunked.rs +++ b/kimchi/src/tests/chunked.rs @@ -2,7 +2,7 @@ use super::framework::TestFramework; use crate::circuits::polynomials::generic::GenericGateSpec; use crate::circuits::{ gate::CircuitGate, - wires::{Wire, COLUMNS}, + wires::{Wire, KIMCHI_COLS}, }; use ark_ff::{UniformRand, Zero}; use itertools::iterate; @@ -26,7 +26,7 @@ fn test_generic_gate_with_srs_override( let mut gates_row = iterate(0, |&i| i + 1); let mut gates = Vec::with_capacity(circuit_size); - let mut witness: [Vec; COLUMNS] = array::from_fn(|_| vec![Fp::zero(); circuit_size]); + let mut witness: [Vec; KIMCHI_COLS] = array::from_fn(|_| vec![Fp::zero(); circuit_size]); let rng = &mut rand::rngs::OsRng; @@ -76,7 +76,7 @@ fn test_generic_gate_with_srs_override( } // create and verify proof based on the witness - let framework = TestFramework::::default() + let framework = TestFramework::::default() .gates(gates) .witness(witness) .public_inputs(public); diff --git a/kimchi/src/tests/ec.rs b/kimchi/src/tests/ec.rs index 565a5e81db..2a980eb603 100644 --- a/kimchi/src/tests/ec.rs +++ b/kimchi/src/tests/ec.rs @@ -35,7 +35,7 @@ fn ec_test() { )); } - let mut witness: [Vec; COLUMNS] = array::from_fn(|_| vec![]); + let mut witness: [Vec; KIMCHI_COLS] = array::from_fn(|_| vec![]); let rng = &mut StdRng::from_seed([0; 32]); @@ -145,7 +145,7 @@ fn ec_test() { witness[14].push(F::zero()); } - TestFramework::::default() + TestFramework::::default() .gates(gates) .witness(witness) .setup() diff --git a/kimchi/src/tests/endomul.rs b/kimchi/src/tests/endomul.rs index bdfb0e2d8e..1610c03e97 100644 --- a/kimchi/src/tests/endomul.rs +++ b/kimchi/src/tests/endomul.rs @@ -49,7 +49,7 @@ fn endomul_test() { let (endo_q, endo_r) = endos::(); - let mut witness: [Vec; COLUMNS] = + let mut witness: [Vec; KIMCHI_COLS] = array::from_fn(|_| vec![F::zero(); rows_per_scalar * num_scalars]); let rng = &mut StdRng::from_seed([0; 32]); @@ -110,7 +110,7 @@ fn endomul_test() { assert_eq!(x.into_repr(), res.n.into_repr()); } - TestFramework::::default() + TestFramework::::default() .gates(gates) .witness(witness) .setup() diff --git a/kimchi/src/tests/endomul_scalar.rs b/kimchi/src/tests/endomul_scalar.rs index 338df87e7a..97068a91ab 100644 --- a/kimchi/src/tests/endomul_scalar.rs +++ b/kimchi/src/tests/endomul_scalar.rs @@ -45,7 +45,7 @@ fn endomul_scalar_test() { let (_, endo_scalar_coeff) = endos::(); - let mut witness: [Vec; COLUMNS] = array::from_fn(|_| vec![]); + let mut witness: [Vec; KIMCHI_COLS] = array::from_fn(|_| vec![]); let rng = &mut StdRng::from_seed([0; 32]); @@ -64,7 +64,7 @@ fn endomul_scalar_test() { ); } - TestFramework::::default() + TestFramework::::default() .gates(gates) .witness(witness) .setup() diff --git a/kimchi/src/tests/foreign_field_add.rs b/kimchi/src/tests/foreign_field_add.rs index 5f92b7d2ee..568e0c19b9 100644 --- a/kimchi/src/tests/foreign_field_add.rs +++ b/kimchi/src/tests/foreign_field_add.rs @@ -4,7 +4,7 @@ use crate::circuits::polynomials::generic::GenericGateSpec; use crate::circuits::{ constraints::ConstraintSystem, gate::{CircuitGate, CircuitGateError, Connect, GateType}, - polynomial::COLUMNS, + polynomial::KIMCHI_COLS, polynomials::{ foreign_field_add::witness::{self, FFOps}, range_check::{self, witness::extend_multi}, @@ -27,7 +27,10 @@ use o1_utils::{ foreign_field::{BigUintForeignFieldHelpers, ForeignElement, HI, LO, MI, TWO_TO_LIMB}, FieldHelpers, Two, }; -use poly_commitment::srs::{endos, SRS}; +use poly_commitment::{ + evaluation_proof::OpeningProof, + srs::{endos, SRS}, +}; use rand::{rngs::StdRng, Rng, SeedableRng}; use std::array; use std::sync::Arc; @@ -263,11 +266,11 @@ fn short_witness( inputs: &Vec, opcodes: &[FFOps], modulus: BigUint, -) -> [Vec; COLUMNS] { - let mut witness: [Vec; COLUMNS] = array::from_fn(|_| vec![F::zero(); 1]); +) -> [Vec; KIMCHI_COLS] { + let mut witness: [Vec; KIMCHI_COLS] = array::from_fn(|_| vec![F::zero(); 1]); witness[0][0] = F::one(); let add_witness = witness::create_chain::(inputs, opcodes, modulus); - for col in 0..COLUMNS { + for col in 0..KIMCHI_COLS { witness[col].extend(add_witness[col].iter()); } witness @@ -281,8 +284,8 @@ fn long_witness( inputs: &Vec, opcodes: &[FFOps], modulus: BigUint, -) -> [Vec; COLUMNS] { - let mut witness: [Vec; COLUMNS] = short_witness(inputs, opcodes, modulus); +) -> [Vec; KIMCHI_COLS] { + let mut witness: [Vec; KIMCHI_COLS] = short_witness(inputs, opcodes, modulus); let num = inputs.len() - 1; // number of chained additions @@ -313,7 +316,7 @@ fn create_test_constraint_system_ffadd( opcodes: &[FFOps], foreign_field_modulus: BigUint, full: bool, -) -> ProverIndex { +) -> ProverIndex> { let (_next_row, gates) = if full { full_circuit(opcodes, &foreign_field_modulus) } else { @@ -322,14 +325,14 @@ fn create_test_constraint_system_ffadd( let cs = ConstraintSystem::create(gates) .public(1) - .build::() + .build::() .unwrap(); let mut srs = SRS::::create(cs.domain.d1.size()); srs.add_lagrange_basis(cs.domain.d1); let srs = Arc::new(srs); let (endo_q, _endo_r) = endos::(); - ProverIndex::::create(cs, endo_q, srs) + ProverIndex::>::create(cs, endo_q, srs) } // helper to reduce lines of code in repetitive test structure @@ -338,7 +341,10 @@ fn test_ffadd( inputs: Vec, opcodes: &[FFOps], full: bool, -) -> ([Vec; COLUMNS], ProverIndex) { +) -> ( + [Vec; KIMCHI_COLS], + ProverIndex>, +) { let index = create_test_constraint_system_ffadd(opcodes, foreign_field_modulus.clone(), full); let witness = if full { @@ -351,7 +357,7 @@ fn test_ffadd( for row in 0..all_rows { assert_eq!( - index.cs.gates[row].verify_witness::( + index.cs.gates[row].verify_witness::( row, &witness, &index.cs, @@ -365,7 +371,10 @@ fn test_ffadd( } // checks that the result cells of the witness are computed as expected -fn check_result(witness: [Vec; COLUMNS], result: Vec>) { +fn check_result( + witness: [Vec; KIMCHI_COLS], + result: Vec>, +) { for (i, res) in result.iter().enumerate() { assert_eq!(witness[0][i + 2], res[LO]); assert_eq!(witness[1][i + 2], res[MI]); @@ -374,12 +383,12 @@ fn check_result(witness: [Vec; COLUMNS], result: Vec; COLUMNS], ovf: PallasField) { +fn check_ovf(witness: [Vec; KIMCHI_COLS], ovf: PallasField) { assert_eq!(witness[6][1], ovf); } // checks the result of the carry bits for one addition -fn check_carry(witness: [Vec; COLUMNS], carry: PallasField) { +fn check_carry(witness: [Vec; KIMCHI_COLS], carry: PallasField) { assert_eq!(witness[7][1], carry); } @@ -755,7 +764,7 @@ fn test_wrong_sum() { witness[0][12] = all_ones_limb; assert_eq!( - index.cs.gates[1].verify_witness::( + index.cs.gates[1].verify_witness::( 1, &witness, &index.cs, @@ -779,7 +788,7 @@ fn test_wrong_dif() { witness[0][12] = PallasField::zero(); assert_eq!( - index.cs.gates[1].verify_witness::( + index.cs.gates[1].verify_witness::( 1, &witness, &index.cs, @@ -1066,7 +1075,7 @@ fn test_bad_bound() { // Modify overflow to check first the copy constraint and then the ovf constraint witness[6][2] = -PallasField::one(); assert_eq!( - index.cs.gates[2].verify_witness::( + index.cs.gates[2].verify_witness::( 2, &witness, &index.cs, @@ -1080,7 +1089,7 @@ fn test_bad_bound() { ); witness[0][0] = -PallasField::one(); assert_eq!( - index.cs.gates[2].verify_witness::( + index.cs.gates[2].verify_witness::( 2, &witness, &index.cs, @@ -1091,7 +1100,7 @@ fn test_bad_bound() { witness[6][2] = PallasField::one(); witness[0][0] = PallasField::one(); assert_eq!( - index.cs.gates[2].verify_witness::( + index.cs.gates[2].verify_witness::( 2, &witness, &index.cs, @@ -1120,7 +1129,7 @@ fn test_random_bad_input() { // First modify left input only to cause an invalid copy constraint witness[0][1] += PallasField::one(); assert_eq!( - index.cs.gates[1].verify_witness::( + index.cs.gates[1].verify_witness::( 1, &witness, &index.cs, @@ -1135,7 +1144,7 @@ fn test_random_bad_input() { // then modify the value in the range check to cause an invalid FFAdd constraint witness[0][4] += PallasField::one(); assert_eq!( - index.cs.gates[1].verify_witness::( + index.cs.gates[1].verify_witness::( 1, &witness, &index.cs, @@ -1164,7 +1173,7 @@ fn test_random_bad_parameters() { // Modify bot carry witness[7][1] += PallasField::one(); assert_eq!( - index.cs.gates[1].verify_witness::( + index.cs.gates[1].verify_witness::( 1, &witness, &index.cs, @@ -1176,7 +1185,7 @@ fn test_random_bad_parameters() { // Modify overflow witness[6][1] += PallasField::one(); assert_eq!( - index.cs.gates[1].verify_witness::( + index.cs.gates[1].verify_witness::( 1, &witness, &index.cs, @@ -1188,7 +1197,7 @@ fn test_random_bad_parameters() { // Modify sign index.cs.gates[1].coeffs[3] = PallasField::zero() - index.cs.gates[1].coeffs[3]; assert_eq!( - index.cs.gates[1].verify_witness::( + index.cs.gates[1].verify_witness::( 1, &witness, &index.cs, @@ -1199,7 +1208,7 @@ fn test_random_bad_parameters() { index.cs.gates[1].coeffs[3] = PallasField::zero() - index.cs.gates[1].coeffs[3]; // Check back to normal assert_eq!( - index.cs.gates[1].verify_witness::( + index.cs.gates[1].verify_witness::( 1, &witness, &index.cs, @@ -1266,7 +1275,7 @@ fn prove_and_verify(operation_count: usize) { // Create witness let witness = short_witness(&inputs, &operations, foreign_field_modulus); - TestFramework::::default() + TestFramework::::default() .gates(gates) .witness(witness) .public_inputs(vec![PallasField::one()]) @@ -1297,7 +1306,7 @@ fn extend_gate_bound_rc(gates: &mut Vec>) -> usize { } // Extends a witness with the final bound range check -fn extend_witness_bound_rc(witness: &mut [Vec; COLUMNS]) { +fn extend_witness_bound_rc(witness: &mut [Vec; KIMCHI_COLS]) { let bound_row = witness[0].len() - 1; let bound_lo = witness[0][bound_row]; let bound_mi = witness[1][bound_row]; @@ -1326,7 +1335,7 @@ fn test_ffadd_no_rc() { let cs = ConstraintSystem::create(gates) .public(1) - .build::() + .build::() .unwrap(); // Create inputs @@ -1341,7 +1350,7 @@ fn test_ffadd_no_rc() { for row in 0..witness[0].len() { assert_eq!( - cs.gates[row].verify_witness::( + cs.gates[row].verify_witness::( row, &witness, &cs, @@ -1385,7 +1394,7 @@ fn test_pallas_on_pallas() { // Boilerplate for tests fn run_test( foreign_field_modulus: &BigUint, -) -> (CircuitGateResult<()>, [Vec; COLUMNS]) +) -> (CircuitGateResult<()>, [Vec; KIMCHI_COLS]) where G::BaseField: PrimeField, G: KimchiCurve, @@ -1409,13 +1418,13 @@ where let cs = ConstraintSystem::create(gates.clone()) .public(1) - .build::() + .build::() .unwrap(); // Perform witness verification that everything is ok before invalidation (quick checks) for (row, gate) in gates.iter().enumerate().take(witness[0].len()) { let result = - gate.verify_witness::(row, &witness, &cs, &witness[0][0..cs.public]); + gate.verify_witness::(row, &witness, &cs, &witness[0][0..cs.public]); if result.is_err() { return (result, witness); } @@ -1472,14 +1481,14 @@ fn test_ffadd_finalization() { // witness let witness = { // create row for the public value 1 - let mut witness: [_; COLUMNS] = array::from_fn(|_col| vec![Fp::zero(); 1]); + let mut witness: [_; KIMCHI_COLS] = array::from_fn(|_col| vec![Fp::zero(); 1]); witness[0][0] = Fp::one(); // create inputs to the addition let left = modulus.clone() - BigUint::one(); let right = modulus.clone() - BigUint::one(); // create a chain of 1 addition let add_witness = witness::create_chain::(&vec![left, right], operation, modulus); - for col in 0..COLUMNS { + for col in 0..KIMCHI_COLS { witness[col].extend(add_witness[col].iter()); } // extend range checks for all of left, right, output, and bound @@ -1498,19 +1507,19 @@ fn test_ffadd_finalization() { let cs = ConstraintSystem::create(gates.clone()) .lookup(vec![range_check::gadget::lookup_table()]) .public(num_public_inputs) - .build::() + .build::() .unwrap(); let mut srs = SRS::::create(cs.domain.d1.size()); srs.add_lagrange_basis(cs.domain.d1); let srs = Arc::new(srs); let (endo_q, _endo_r) = endos::(); - ProverIndex::::create(cs, endo_q, srs) + ProverIndex::>::create(cs, endo_q, srs) }; for row in 0..witness[0].len() { assert_eq!( - index.cs.gates[row].verify_witness::( + index.cs.gates[row].verify_witness::( row, &witness, &index.cs, @@ -1520,7 +1529,7 @@ fn test_ffadd_finalization() { ); } - TestFramework::::default() + TestFramework::::default() .gates(gates) .witness(witness.clone()) .public_inputs(vec![witness[0][0]]) diff --git a/kimchi/src/tests/foreign_field_mul.rs b/kimchi/src/tests/foreign_field_mul.rs index eb77101018..21d68baa6e 100644 --- a/kimchi/src/tests/foreign_field_mul.rs +++ b/kimchi/src/tests/foreign_field_mul.rs @@ -2,7 +2,7 @@ use crate::{ circuits::{ constraints::ConstraintSystem, gate::{CircuitGate, CircuitGateError, CircuitGateResult, Connect, GateType}, - polynomial::COLUMNS, + polynomial::KIMCHI_COLS, polynomials::foreign_field_mul, }, curve::KimchiCurve, @@ -75,11 +75,11 @@ fn run_test( right_input: &BigUint, foreign_field_modulus: &BigUint, invalidations: Vec<((usize, usize), G::ScalarField)>, -) -> (CircuitGateResult<()>, [Vec; COLUMNS]) +) -> (CircuitGateResult<()>, [Vec; KIMCHI_COLS]) where G::BaseField: PrimeField, EFqSponge: Clone + FqSponge, - EFrSponge: FrSponge, + EFrSponge: FrSponge, { // Create foreign field multiplication gates let (mut next_row, mut gates) = @@ -191,7 +191,7 @@ where let runner = if full { // Create prover index with test framework Some( - TestFramework::::default() + TestFramework::::default() .disable_gates_checks(disable_gates_checks) .gates(gates.clone()) .setup(), @@ -205,14 +205,14 @@ where } else { // If not full mode, just create constraint system (this is much faster) ConstraintSystem::create(gates.clone()) - .build::() + .build::() .unwrap() }; // Perform witness verification that everything is ok before invalidation (quick checks) for (row, gate) in gates.iter().enumerate().take(witness[0].len()) { let result = - gate.verify_witness::(row, &witness, &cs, &witness[0][0..cs.public]); + gate.verify_witness::(row, &witness, &cs, &witness[0][0..cs.public]); if result.is_err() { return (result, witness); } @@ -246,7 +246,7 @@ where // When targeting the plookup constraints the invalidated values would cause custom constraint // failures, so we want to suppress these witness verification checks when doing plookup tests. for (row, gate) in gates.iter().enumerate().take(witness[0].len()) { - let result = gate.verify_witness::( + let result = gate.verify_witness::( row, &witness, &cs, @@ -295,7 +295,7 @@ fn test_custom_constraints(foreign_field_m where G::BaseField: PrimeField, EFqSponge: Clone + FqSponge, - EFrSponge: FrSponge, + EFrSponge: FrSponge, { let rng = &mut StdRng::from_seed(RNG_SEED); diff --git a/kimchi/src/tests/framework.rs b/kimchi/src/tests/framework.rs index 3957ec35ff..e224d9e895 100644 --- a/kimchi/src/tests/framework.rs +++ b/kimchi/src/tests/framework.rs @@ -7,28 +7,44 @@ use crate::{ runtime_tables::{RuntimeTable, RuntimeTableCfg}, tables::LookupTable, }, - wires::COLUMNS, + wires::KIMCHI_COLS, }, curve::KimchiCurve, plonk_sponge::FrSponge, proof::{ProverProof, RecursionChallenge}, - prover_index::{testing::new_index_for_test_with_lookups, ProverIndex}, + prover_index::{ + testing::{ + new_index_for_test_with_lookups, new_index_for_test_with_lookups_and_custom_srs, + }, + ProverIndex, + }, verifier::verify, verifier_index::VerifierIndex, }; use ark_ff::PrimeField; +use ark_poly::Radix2EvaluationDomain as D; use groupmap::GroupMap; use mina_poseidon::sponge::FqSponge; use num_bigint::BigUint; -use poly_commitment::commitment::CommitmentCurve; +use poly_commitment::{ + commitment::CommitmentCurve, evaluation_proof::OpeningProof as DlogOpeningProof, OpenProof, +}; use std::{fmt::Write, time::Instant}; // aliases #[derive(Default, Clone)] -pub(crate) struct TestFramework { +pub(crate) struct TestFramework< + G: KimchiCurve, + const COLUMNS: usize = KIMCHI_COLS, + OpeningProof: OpenProof = DlogOpeningProof, +> where + G::BaseField: PrimeField, + OpeningProof::SRS: Clone, + VerifierIndex: Clone, +{ gates: Option>>, - witness: Option<[Vec; W]>, + witness: Option<[Vec; COLUMNS]>, public_inputs: Vec, lookup_tables: Vec>, runtime_tables_setup: Option>>, @@ -36,17 +52,29 @@ pub(crate) struct TestFramework { recursion: Vec>, num_prev_challenges: usize, disable_gates_checks: bool, - prover_index: Option>, - verifier_index: Option>, + override_srs_size: Option, + + prover_index: Option>, + verifier_index: Option>, } #[derive(Clone)] -pub(crate) struct TestRunner(TestFramework); +pub(crate) struct TestRunner< + G: KimchiCurve, + const COLUMNS: usize = KIMCHI_COLS, + OpeningProof: OpenProof = DlogOpeningProof, +>(TestFramework) +where + G::BaseField: PrimeField, + OpeningProof::SRS: Clone, + VerifierIndex: Clone; -impl TestFramework +impl> + TestFramework where G::BaseField: PrimeField, - G::ScalarField: PrimeField, + OpeningProof::SRS: Clone, + VerifierIndex: Clone, { #[must_use] pub(crate) fn gates(mut self, gates: Vec>) -> Self { @@ -55,7 +83,7 @@ where } #[must_use] - pub(crate) fn witness(mut self, witness: [Vec; W]) -> Self { + pub(crate) fn witness(mut self, witness: [Vec; COLUMNS]) -> Self { self.witness = Some(witness); self } @@ -93,21 +121,65 @@ where self } + #[must_use] + pub(crate) fn override_srs_size(mut self, size: usize) -> Self { + self.override_srs_size = Some(size); + self + } + + /// creates the indexes + #[must_use] + pub(crate) fn setup_with_custom_srs, usize) -> OpeningProof::SRS>( + mut self, + get_srs: F, + ) -> TestRunner { + let start = Instant::now(); + + let lookup_tables = std::mem::take(&mut self.lookup_tables); + let runtime_tables_setup = self.runtime_tables_setup.take(); + + let index = new_index_for_test_with_lookups_and_custom_srs( + self.gates.take().unwrap(), + self.public_inputs.len(), + self.num_prev_challenges, + lookup_tables, + runtime_tables_setup, + self.disable_gates_checks, + self.override_srs_size, + get_srs, + ); + println!( + "- time to create prover index: {:?}s", + start.elapsed().as_secs() + ); + + self.verifier_index = Some(index.verifier_index()); + self.prover_index = Some(index); + + TestRunner(self) + } +} + +impl TestFramework +where + G::BaseField: PrimeField, +{ /// creates the indexes #[must_use] - pub(crate) fn setup(mut self) -> TestRunner { + pub(crate) fn setup(mut self) -> TestRunner { let start = Instant::now(); let lookup_tables = std::mem::take(&mut self.lookup_tables); let runtime_tables_setup = self.runtime_tables_setup.take(); - let index = new_index_for_test_with_lookups::( + let index = new_index_for_test_with_lookups::( self.gates.take().unwrap(), self.public_inputs.len(), self.num_prev_challenges, lookup_tables, runtime_tables_setup, self.disable_gates_checks, + self.override_srs_size, ); println!( "- time to create prover index: {:?}s", @@ -121,10 +193,13 @@ where } } -impl TestRunner +impl> + TestRunner where G::ScalarField: PrimeField + Clone, G::BaseField: PrimeField + Clone, + OpeningProof::SRS: Clone, + VerifierIndex: Clone, { #[must_use] pub(crate) fn runtime_tables( @@ -142,12 +217,12 @@ where } #[must_use] - pub(crate) fn witness(mut self, witness: [Vec; W]) -> Self { + pub(crate) fn witness(mut self, witness: [Vec; COLUMNS]) -> Self { self.0.witness = Some(witness); self } - pub(crate) fn prover_index(&self) -> &ProverIndex { + pub(crate) fn prover_index(&self) -> &ProverIndex { self.0.prover_index.as_ref().unwrap() } @@ -156,7 +231,7 @@ where pub(crate) fn prove(self) -> Result<(), String> where EFqSponge: Clone + FqSponge, - EFrSponge: FrSponge, + EFrSponge: FrSponge, { let prover = self.0.prover_index.unwrap(); let witness = self.0.witness.unwrap(); @@ -187,7 +262,7 @@ where pub(crate) fn prove_and_verify(self) -> Result<(), String> where EFqSponge: Clone + FqSponge, - EFrSponge: FrSponge, + EFrSponge: FrSponge, { let prover = self.0.prover_index.unwrap(); let witness = self.0.witness.unwrap(); @@ -218,7 +293,7 @@ where // verify the proof (propagate any errors) let start = Instant::now(); - verify::( + verify::( &group_map, &self.0.verifier_index.unwrap(), &proof, @@ -231,7 +306,7 @@ where } } -pub fn print_witness(cols: &[Vec; COLUMNS], start_row: usize, end_row: usize) +pub fn print_witness(cols: &[Vec; KIMCHI_COLS], start_row: usize, end_row: usize) where F: PrimeField, { diff --git a/kimchi/src/tests/generic.rs b/kimchi/src/tests/generic.rs index 9546fa2db2..57f0ca78c6 100644 --- a/kimchi/src/tests/generic.rs +++ b/kimchi/src/tests/generic.rs @@ -1,6 +1,6 @@ use super::framework::TestFramework; use crate::circuits::polynomials::generic::testing::{create_circuit, fill_in_witness}; -use crate::circuits::wires::COLUMNS; +use crate::circuits::wires::KIMCHI_COLS; use ark_ff::Zero; use mina_curves::pasta::{Fp, Vesta, VestaParameters}; use mina_poseidon::{ @@ -15,14 +15,14 @@ type ScalarSponge = DefaultFrSponge; #[test] fn test_generic_gate() { - let gates = create_circuit::(0, 0); + let gates = create_circuit::(0, 0); // create witness - let mut witness: [Vec; COLUMNS] = array::from_fn(|_| vec![Fp::zero(); gates.len()]); + let mut witness: [Vec; KIMCHI_COLS] = array::from_fn(|_| vec![Fp::zero(); gates.len()]); fill_in_witness(0, &mut witness, &[]); // create and verify proof based on the witness - TestFramework::::default() + TestFramework::::default() .gates(gates) .witness(witness) .setup() @@ -33,14 +33,14 @@ fn test_generic_gate() { #[test] fn test_generic_gate_pub() { let public = vec![Fp::from(3u8); 5]; - let gates = create_circuit::(0, public.len()); + let gates = create_circuit::(0, public.len()); // create witness - let mut witness: [Vec; COLUMNS] = array::from_fn(|_| vec![Fp::zero(); gates.len()]); + let mut witness: [Vec; KIMCHI_COLS] = array::from_fn(|_| vec![Fp::zero(); gates.len()]); fill_in_witness(0, &mut witness, &public); // create and verify proof based on the witness - TestFramework::::default() + TestFramework::::default() .gates(gates) .witness(witness) .public_inputs(public) @@ -52,14 +52,14 @@ fn test_generic_gate_pub() { #[test] fn test_generic_gate_pub_all_zeros() { let public = vec![Fp::from(0u8); 5]; - let gates = create_circuit::(0, public.len()); + let gates = create_circuit::(0, public.len()); // create witness - let mut witness: [Vec; COLUMNS] = array::from_fn(|_| vec![Fp::zero(); gates.len()]); + let mut witness: [Vec; KIMCHI_COLS] = array::from_fn(|_| vec![Fp::zero(); gates.len()]); fill_in_witness(0, &mut witness, &public); // create and verify proof based on the witness - TestFramework::::default() + TestFramework::::default() .gates(gates) .witness(witness) .public_inputs(public) @@ -71,14 +71,14 @@ fn test_generic_gate_pub_all_zeros() { #[test] fn test_generic_gate_pub_empty() { let public = vec![]; - let gates = create_circuit::(0, public.len()); + let gates = create_circuit::(0, public.len()); // create witness - let mut witness: [Vec; COLUMNS] = array::from_fn(|_| vec![Fp::zero(); gates.len()]); + let mut witness: [Vec; KIMCHI_COLS] = array::from_fn(|_| vec![Fp::zero(); gates.len()]); fill_in_witness(0, &mut witness, &public); // create and verify proof based on the witness - TestFramework::::default() + TestFramework::::default() .gates(gates) .witness(witness) .public_inputs(public) @@ -86,3 +86,41 @@ fn test_generic_gate_pub_empty() { .prove_and_verify::() .unwrap(); } + +#[cfg(feature = "bn254")] +#[test] +fn test_generic_gate_pairing() { + type Fp = ark_bn254::Fr; + type SpongeParams = PlonkSpongeConstantsKimchi; + type BaseSponge = DefaultFqSponge; + type ScalarSponge = DefaultFrSponge; + + use ark_ff::UniformRand; + + let public = vec![Fp::from(3u8); 5]; + let gates = create_circuit::(0, public.len()); + + let rng = &mut rand::rngs::OsRng; + let x = Fp::rand(rng); + + // create witness + let mut witness: [Vec; KIMCHI_COLS] = array::from_fn(|_| vec![Fp::zero(); gates.len()]); + fill_in_witness(0, &mut witness, &public); + + // create and verify proof based on the witness + >, + > as Default>::default() + .gates(gates) + .witness(witness) + .public_inputs(public) + .setup_with_custom_srs(|d1, usize| { + let mut srs = poly_commitment::pairing_proof::PairingSRS::create(x, usize); + srs.full_srs.add_lagrange_basis(d1); + srs + }) + .prove_and_verify::() + .unwrap(); +} diff --git a/kimchi/src/tests/keccak.rs b/kimchi/src/tests/keccak.rs index aa4847c7c0..49bede4fc6 100644 --- a/kimchi/src/tests/keccak.rs +++ b/kimchi/src/tests/keccak.rs @@ -113,7 +113,7 @@ fn test_keccak_n( where G::BaseField: PrimeField, EFqSponge: Clone + FqSponge, - EFrSponge: FrSponge, + EFrSponge: FrSponge, { let messages = vec![rng.gen_biguint_below(&BigUint::from(2u32).pow(1080)); n]; @@ -136,14 +136,14 @@ where } } - let runner: TestRunner = TestFramework::::default() + let runner: TestRunner = TestFramework::::default() .gates(gates.clone()) .setup(); let cs = runner.clone().prover_index().cs.clone(); // Perform witness verification that everything is ok before invalidation (quick checks) for (row, gate) in gates.iter().enumerate().take(witness[0].len()) { let result = - gate.verify_witness::(row, &witness, &cs, &witness[0][0..cs.public]); + gate.verify_witness::(row, &witness, &cs, &witness[0][0..cs.public]); result?; } assert_eq!( @@ -165,7 +165,7 @@ fn test_keccak( where G::BaseField: PrimeField, EFqSponge: Clone + FqSponge, - EFrSponge: FrSponge, + EFrSponge: FrSponge, { let bytelength = message.to_bytes_be().len(); @@ -192,7 +192,7 @@ where let runner = if full { // Create prover index with test framework Some( - TestFramework::::default() + TestFramework::::default() .gates(gates.clone()) .setup(), ) @@ -209,7 +209,7 @@ where // Perform witness verification that everything is ok before invalidation (quick checks) for (row, gate) in gates.iter().enumerate().take(witness[0].len()) { let result = - gate.verify_witness::(row, &witness, &cs, &witness[0][0..cs.public]); + gate.verify_witness::(row, &witness, &cs, &witness[0][0..cs.public]); if result.is_err() { return (result, hash); } diff --git a/kimchi/src/tests/lookup.rs b/kimchi/src/tests/lookup.rs index f54061c240..16168d229a 100644 --- a/kimchi/src/tests/lookup.rs +++ b/kimchi/src/tests/lookup.rs @@ -5,10 +5,10 @@ use crate::circuits::{ runtime_tables::{RuntimeTable, RuntimeTableCfg}, tables::LookupTable, }, - polynomial::COLUMNS, + polynomial::KIMCHI_COLS, wires::Wire, }; -use ark_ff::Zero; +use ark_ff::{UniformRand, Zero}; use mina_curves::pasta::{Fp, Vesta, VestaParameters}; use mina_poseidon::{ constants::PlonkSpongeConstantsKimchi, @@ -95,7 +95,7 @@ fn setup_lookup_proof(use_values_from_table: bool, num_lookups: usize, table_siz ] }; - TestFramework::::default() + TestFramework::::default() .gates(gates) .witness(witness) .lookup_tables(lookup_tables) @@ -126,6 +126,71 @@ fn lookup_gate_rejects_bad_lookups_multiple_tables() { setup_lookup_proof(false, 500, vec![100, 50, 50, 2, 2]) } +fn setup_successfull_runtime_table_test( + runtime_table_cfgs: Vec>, + runtime_tables: Vec>, + lookups: Vec, +) { + let mut rng = rand::thread_rng(); + let nb_lookups = lookups.len(); + + // circuit + let mut gates = vec![]; + for row in 0..nb_lookups { + gates.push(CircuitGate::new( + GateType::Lookup, + Wire::for_row(row), + vec![], + )); + } + + // witness + let witness = { + let mut cols: [_; KIMCHI_COLS] = array::from_fn(|_col| vec![Fp::zero(); gates.len()]); + + // only the first 7 registers are used in the lookup gate + let (lookup_cols, _rest) = cols.split_at_mut(7); + + for (i, table_id) in lookups.into_iter().enumerate() { + lookup_cols[0][i] = Fp::from(table_id); + let rt = runtime_table_cfgs + .clone() + .into_iter() + .find(|rt_cfg| rt_cfg.id == table_id) + .unwrap(); + let len_rt = rt.len(); + let first_column = rt.first_column; + let data = runtime_tables + .clone() + .into_iter() + .find(|rt| rt.id == table_id) + .unwrap() + .data; + + // create queries into our runtime lookup table. + // We will set [w1, w2], [w3, w4] and [w5, w6] to randon indexes and + // the corresponding values + let lookup_cols = &mut lookup_cols[1..]; + for chunk in lookup_cols.chunks_mut(2) { + let idx = rng.gen_range(0..len_rt); + chunk[0][i] = first_column[idx]; + chunk[1][i] = data[idx]; + } + } + cols + }; + + // run test + TestFramework::::default() + .gates(gates) + .witness(witness) + .runtime_tables_setup(runtime_table_cfgs) + .setup() + .runtime_tables(runtime_tables) + .prove_and_verify::() + .unwrap(); +} + #[test] fn test_runtime_table() { let num = 5; @@ -164,7 +229,7 @@ fn test_runtime_table() { // witness let witness = { - let mut cols: [_; COLUMNS] = array::from_fn(|_col| vec![Fp::zero(); gates.len()]); + let mut cols: [_; KIMCHI_COLS] = array::from_fn(|_col| vec![Fp::zero(); gates.len()]); // only the first 7 registers are used in the lookup gate let (lookup_cols, _rest) = cols.split_at_mut(7); @@ -189,7 +254,7 @@ fn test_runtime_table() { print_witness(&witness, 0, 20); // run test - TestFramework::::default() + TestFramework::::default() .gates(gates) .witness(witness) .runtime_tables_setup(runtime_tables_setup) @@ -225,7 +290,7 @@ fn test_negative_test_runtime_table_value_not_in_table() { // witness. The whole witness is going to be wrong. let witness = { - let mut cols: [_; COLUMNS] = array::from_fn(|_col| vec![Fp::zero(); nb_gates]); + let mut cols: [_; KIMCHI_COLS] = array::from_fn(|_col| vec![Fp::zero(); nb_gates]); // only the first 7 registers are used in the lookup gate let (lookup_cols, _rest) = cols.split_at_mut(7); @@ -245,7 +310,7 @@ fn test_negative_test_runtime_table_value_not_in_table() { }; // run prover only as the error should be raised while creating the proof. - let err = TestFramework::::default() + let err = TestFramework::::default() .gates(gates) .witness(witness) .runtime_tables_setup(vec![cfg]) @@ -287,7 +352,7 @@ fn test_negative_test_runtime_table_prover_with_undefined_id_in_index_and_witnes // witness let witness = { - let mut cols: [_; COLUMNS] = array::from_fn(|_col| vec![Fp::zero(); nb_gates]); + let mut cols: [_; KIMCHI_COLS] = array::from_fn(|_col| vec![Fp::zero(); nb_gates]); // only the first 7 registers are used in the lookup gate let (lookup_cols, _rest) = cols.split_at_mut(7); @@ -307,7 +372,7 @@ fn test_negative_test_runtime_table_prover_with_undefined_id_in_index_and_witnes }; // We only run the prover. No need to verify. - let err = TestFramework::::default() + let err = TestFramework::::default() .gates(gates) .witness(witness) .runtime_tables_setup(vec![cfg]) @@ -347,7 +412,7 @@ fn test_negative_test_runtime_table_prover_uses_undefined_id_in_index_and_witnes // witness let witness = { - let mut cols: [_; COLUMNS] = array::from_fn(|_col| vec![Fp::zero(); nb_gates]); + let mut cols: [_; KIMCHI_COLS] = array::from_fn(|_col| vec![Fp::zero(); nb_gates]); // only the first 7 registers are used in the lookup gate let (lookup_cols, _rest) = cols.split_at_mut(7); @@ -367,7 +432,7 @@ fn test_negative_test_runtime_table_prover_uses_undefined_id_in_index_and_witnes }; // We only run the prover. No need to verify. - let err = TestFramework::::default() + let err = TestFramework::::default() .gates(gates) .witness(witness) .runtime_tables_setup(vec![cfg]) @@ -381,5 +446,130 @@ fn test_negative_test_runtime_table_prover_uses_undefined_id_in_index_and_witnes ); } -// TODO: add a test with a runtime table with ID 0 (it should panic) -// See https://github.com/MinaProtocol/mina/issues/13603 +#[test] +fn test_runtime_table_with_more_than_one_runtime_table_data_given_by_prover() { + let mut rng = rand::thread_rng(); + + let first_column = [0, 1, 2, 3, 4]; + let len = first_column.len(); + + let cfg = RuntimeTableCfg { + id: 1, + first_column: first_column.into_iter().map(Into::into).collect(), + }; + + /* We want to simulate this + table ID | idx | v | v2 + 1 | 0 | 0 | 42 + 1 | 1 | 2 | 32 + 1 | 2 | 4 | 22 + 1 | 3 | 5 | 12 + 1 | 4 | 4 | 2 + */ + + let data_v: Vec = [0u32, 2, 3, 4, 5].into_iter().map(Into::into).collect(); + let data_v2: Vec = [42, 32, 22, 12, 2].into_iter().map(Into::into).collect(); + let runtime_tables: Vec> = vec![ + RuntimeTable { + id: 1, + data: data_v.clone(), + }, + RuntimeTable { + id: 1, + data: data_v2, + }, + ]; + + // circuit + let mut gates = vec![]; + for row in 0..20 { + gates.push(CircuitGate::new( + GateType::Lookup, + Wire::for_row(row), + vec![], + )); + } + + // witness + let witness = { + let mut cols: [_; KIMCHI_COLS] = array::from_fn(|_col| vec![Fp::zero(); gates.len()]); + + // only the first 7 registers are used in the lookup gate + let (lookup_cols, _rest) = cols.split_at_mut(7); + + for row in 0..20 { + // the first register is the table id. + lookup_cols[0][row] = 1.into(); + + // create queries into our runtime lookup table. + // We will set [w1, w2], [w3, w4] and [w5, w6] to randon indexes and + // the corresponding values + let lookup_cols = &mut lookup_cols[1..]; + for chunk in lookup_cols.chunks_mut(2) { + let idx = rng.gen_range(0..len); + chunk[0][row] = first_column[idx].into(); + chunk[1][row] = data_v[idx]; + } + } + cols + }; + + print_witness(&witness, 0, 20); + + // run test + let err = TestFramework::::default() + .gates(gates) + .witness(witness) + .runtime_tables_setup(vec![cfg]) + .setup() + .runtime_tables(runtime_tables) + .prove_and_verify::() + .unwrap_err(); + assert_eq!( + err, + "the runtime tables provided did not match the index's configuration" + ); +} + +#[test] +fn test_runtime_table_only_one_table_with_id_zero_with_non_zero_entries_fixed_values() { + let first_column = [0, 1, 2, 3, 4, 5]; + let table_id = 0; + + let cfg = RuntimeTableCfg { + id: table_id, + first_column: first_column.into_iter().map(Into::into).collect(), + }; + + let data: Vec = [0u32, 1, 2, 3, 4, 5].into_iter().map(Into::into).collect(); + let runtime_table = RuntimeTable { id: table_id, data }; + + let lookups: Vec = [0; 20].into(); + + setup_successfull_runtime_table_test(vec![cfg], vec![runtime_table], lookups); +} + +#[test] +fn test_runtime_table_only_one_table_with_id_zero_with_non_zero_entries_random_values() { + let mut rng = rand::thread_rng(); + + let len = rng.gen_range(1usize..1000); + let first_column: Vec = (0..len as i32).collect(); + + let table_id = 0; + + let cfg = RuntimeTableCfg { + id: table_id, + first_column: first_column.clone().into_iter().map(Into::into).collect(), + }; + + let data: Vec = first_column + .into_iter() + .map(|_| UniformRand::rand(&mut rng)) + .collect(); + let runtime_table = RuntimeTable { id: table_id, data }; + + let lookups: Vec = [0; 20].into(); + + setup_successfull_runtime_table_test(vec![cfg], vec![runtime_table], lookups); +} diff --git a/kimchi/src/tests/mod.rs b/kimchi/src/tests/mod.rs index 8e86206d3b..7ac97faedb 100644 --- a/kimchi/src/tests/mod.rs +++ b/kimchi/src/tests/mod.rs @@ -1,4 +1,5 @@ mod and; +mod chunked; mod ec; mod endomul; mod endomul_scalar; diff --git a/kimchi/src/tests/not.rs b/kimchi/src/tests/not.rs index d1fd1065b1..0bd2835ad3 100644 --- a/kimchi/src/tests/not.rs +++ b/kimchi/src/tests/not.rs @@ -4,7 +4,7 @@ use crate::{ circuits::{ constraints::ConstraintSystem, gate::{CircuitGate, CircuitGateError, GateType}, - polynomial::COLUMNS, + polynomial::KIMCHI_COLS, polynomials::{generic::GenericGateSpec, not, xor}, wires::Wire, }, @@ -23,6 +23,7 @@ use mina_poseidon::{ }; use num_bigint::BigUint; use o1_utils::{BigUintHelpers, BitwiseOps, FieldHelpers, RandomField}; +use poly_commitment::evaluation_proof::OpeningProof; use rand::{rngs::StdRng, SeedableRng}; type PallasField = ::BaseField; @@ -45,8 +46,8 @@ const RNG_SEED: [u8; 32] = [ fn create_not_witness_unchecked_length( inputs: &[F], bits: usize, -) -> [Vec; COLUMNS] { - let mut witness: [Vec; COLUMNS] = array::from_fn(|_| vec![F::zero(); 1]); +) -> [Vec; KIMCHI_COLS] { + let mut witness: [Vec; KIMCHI_COLS] = array::from_fn(|_| vec![F::zero(); 1]); witness[0][0] = F::from(2u8).pow([bits as u64]) - F::one(); let result = not::extend_not_witness_unchecked_length(&mut witness, inputs, bits); if let Err(e) = result { @@ -62,8 +63,8 @@ fn create_not_witness_unchecked_length( fn create_not_witness_checked_length( input: F, bits: Option, -) -> [Vec; COLUMNS] { - let mut witness: [Vec; COLUMNS] = array::from_fn(|_| vec![F::zero(); 1]); +) -> [Vec; KIMCHI_COLS] { + let mut witness: [Vec; KIMCHI_COLS] = array::from_fn(|_| vec![F::zero(); 1]); let input_big = input.to_biguint(); let real_bits = max(input_big.bitlen(), bits.unwrap_or(0)); witness[0][0] = F::from(2u8).pow([real_bits as u64]) - F::one(); @@ -91,7 +92,7 @@ where ConstraintSystem::create(gates) .public(1) - .build::() + .build::() .unwrap() } @@ -112,7 +113,7 @@ where ConstraintSystem::create(gates) .public(1) - .build::() + .build::() .unwrap() } @@ -121,7 +122,7 @@ fn setup_not_xor( input: Option, bits: Option, ) -> ( - [Vec; COLUMNS], + [Vec; KIMCHI_COLS], ConstraintSystem, ) where @@ -148,7 +149,7 @@ where fn test_not_xor( input: Option, bits: Option, -) -> [Vec; COLUMNS] +) -> [Vec; KIMCHI_COLS] where G::BaseField: PrimeField, { @@ -156,7 +157,7 @@ where for row in 0..witness[0].len() { assert_eq!( - cs.gates[row].verify_witness::( + cs.gates[row].verify_witness::( row, &witness, &cs, @@ -175,7 +176,7 @@ fn setup_not_gnrc( bits: usize, len: Option, ) -> ( - [Vec; COLUMNS], + [Vec; KIMCHI_COLS], ConstraintSystem, ) where @@ -208,7 +209,7 @@ fn test_not_gnrc( inputs: Option>, bits: usize, len: Option, -) -> [Vec; COLUMNS] +) -> [Vec; KIMCHI_COLS] where G::BaseField: PrimeField, { @@ -217,7 +218,7 @@ where // test public input and not generic gate for row in 0..witness[0].len() { assert_eq!( - cs.gates[row].verify_witness::( + cs.gates[row].verify_witness::( row, &witness, &cs, @@ -232,7 +233,7 @@ where // Manually checks the NOT of each crumb in the witness fn check_not_xor( - witness: &[Vec; COLUMNS], + witness: &[Vec; KIMCHI_COLS], input: G::ScalarField, bits: Option, ) { @@ -247,7 +248,7 @@ fn check_not_xor( // Manually checks the NOTs of a vector of inputs in generic gates fn check_not_gnrc( - witness: &[Vec; COLUMNS], + witness: &[Vec; KIMCHI_COLS], inputs: &[G::ScalarField], bits: usize, ) { @@ -289,7 +290,7 @@ fn test_prove_and_verify_not_xor() { let witness = create_not_witness_checked_length::(rng.gen_field_with_bits(bits), Some(bits)); - TestFramework::::default() + TestFramework::::default() .gates(gates) .witness(witness) .public_inputs(vec![ @@ -325,7 +326,7 @@ fn test_prove_and_verify_five_not_gnrc() { bits, ); - TestFramework::::default() + TestFramework::::default() .gates(gates) .witness(witness) .public_inputs(vec![ @@ -407,7 +408,12 @@ fn test_bad_not_gnrc() { // modify public input row to make sure the copy constraint fails and the generic gate also fails witness[0][0] += PallasField::one(); assert_eq!( - cs.gates[0].verify_witness::(0, &witness, &cs, &witness[0][0..cs.public]), + cs.gates[0].verify_witness::( + 0, + &witness, + &cs, + &witness[0][0..cs.public] + ), Err(CircuitGateError::CopyConstraint { typ: GateType::Generic, src: Wire { row: 0, col: 0 }, @@ -415,10 +421,22 @@ fn test_bad_not_gnrc() { }) ); witness[0][1] += PallasField::one(); - let index = - new_index_for_test_with_lookups(cs.gates, 1, 0, vec![xor::lookup_table()], None, false); + let index = new_index_for_test_with_lookups( + cs.gates, + 1, + 0, + vec![xor::lookup_table()], + None, + false, + None, + ); assert_eq!( - index.cs.gates[1].verify::(1, &witness, &index, &[]), + index.cs.gates[1].verify::, KIMCHI_COLS>( + 1, + &witness, + &index, + &[] + ), Err(("generic: incorrect gate").to_string()) ); } @@ -430,7 +448,12 @@ fn test_bad_not_xor() { // modify public input row to make sure the copy constraint fails and the XOR gate also fails witness[0][0] += PallasField::one(); assert_eq!( - cs.gates[0].verify_witness::(0, &witness, &cs, &witness[0][0..cs.public]), + cs.gates[0].verify_witness::( + 0, + &witness, + &cs, + &witness[0][0..cs.public] + ), Err(CircuitGateError::CopyConstraint { typ: GateType::Generic, src: Wire { row: 0, col: 0 }, @@ -440,7 +463,12 @@ fn test_bad_not_xor() { witness[1][1] += PallasField::one(); // decomposition of xor fails assert_eq!( - cs.gates[1].verify_witness::(1, &witness, &cs, &witness[0][0..cs.public]), + cs.gates[1].verify_witness::( + 1, + &witness, + &cs, + &witness[0][0..cs.public] + ), Err(CircuitGateError::Constraint(GateType::Xor16, 2)) ); // Make the second input zero with correct decomposition to make sure XOR table fails @@ -452,7 +480,7 @@ fn test_bad_not_xor() { witness[10][1] = PallasField::zero(); assert_eq!( - TestFramework::::default() + TestFramework::::default() .gates(cs.gates) .witness(witness) .setup() diff --git a/kimchi/src/tests/poseidon.rs b/kimchi/src/tests/poseidon.rs index dfaff7393f..5f8ceb56f8 100644 --- a/kimchi/src/tests/poseidon.rs +++ b/kimchi/src/tests/poseidon.rs @@ -3,7 +3,7 @@ use crate::{ gate::CircuitGate, polynomials, polynomials::poseidon::ROUNDS_PER_ROW, - wires::{Wire, COLUMNS}, + wires::{Wire, KIMCHI_COLS}, }, curve::KimchiCurve, tests::framework::TestFramework, @@ -62,7 +62,7 @@ fn test_poseidon() { } // witness for Poseidon permutation custom constraints - let mut witness: [Vec; COLUMNS] = + let mut witness: [Vec; KIMCHI_COLS] = array::from_fn(|_| vec![Fp::zero(); POS_ROWS_PER_HASH * NUM_POS + 1 /* last output row */]); // creates a random input @@ -81,7 +81,7 @@ fn test_poseidon() { ); } - TestFramework::::default() + TestFramework::::default() .gates(gates) .witness(witness) .setup() diff --git a/kimchi/src/tests/range_check.rs b/kimchi/src/tests/range_check.rs index 77a37b4144..da1edd759a 100644 --- a/kimchi/src/tests/range_check.rs +++ b/kimchi/src/tests/range_check.rs @@ -2,7 +2,7 @@ use crate::{ circuits::{ constraints::ConstraintSystem, gate::{CircuitGate, CircuitGateError, GateType}, - polynomial::COLUMNS, + polynomial::KIMCHI_COLS, polynomials::{ generic::GenericGateSpec, range_check::{self}, @@ -38,6 +38,7 @@ use mina_poseidon::{ }; use poly_commitment::{ commitment::CommitmentCurve, + evaluation_proof::OpeningProof, srs::{endos, SRS}, }; @@ -53,7 +54,10 @@ const RNG_SEED: [u8; 32] = [ 0, 33, 210, 215, 172, 130, 24, 164, 12, ]; -fn create_test_prover_index(public_size: usize, compact: bool) -> ProverIndex { +fn create_test_prover_index( + public_size: usize, + compact: bool, +) -> ProverIndex> { let (_next_row, gates) = if compact { CircuitGate::::create_compact_multi_range_check(0) } else { @@ -67,17 +71,19 @@ fn create_test_prover_index(public_size: usize, compact: bool) -> ProverIndex; COLUMNS] = array::from_fn(|_| vec![PallasField::from(0); 4]); + let witness: [Vec; KIMCHI_COLS] = + array::from_fn(|_| vec![PallasField::from(0); 4]); // gates[0] is RangeCheck0 assert_eq!( - index.cs.gates[0].verify_witness::( + index.cs.gates[0].verify_witness::( 0, &witness, &index.cs, @@ -88,7 +94,7 @@ fn verify_range_check0_zero_valid_witness() { // gates[1] is RangeCheck0 assert_eq!( - index.cs.gates[1].verify_witness::( + index.cs.gates[1].verify_witness::( 1, &witness, &index.cs, @@ -101,11 +107,12 @@ fn verify_range_check0_zero_valid_witness() { #[test] fn verify_range_check0_one_invalid_witness() { let index = create_test_prover_index(0, false); - let witness: [Vec; COLUMNS] = array::from_fn(|_| vec![PallasField::from(1); 4]); + let witness: [Vec; KIMCHI_COLS] = + array::from_fn(|_| vec![PallasField::from(1); 4]); // gates[0] is RangeCheck0 assert_eq!( - index.cs.gates[0].verify_witness::( + index.cs.gates[0].verify_witness::( 0, &witness, &index.cs, @@ -116,7 +123,7 @@ fn verify_range_check0_one_invalid_witness() { // gates[1] is RangeCheck0 assert_eq!( - index.cs.gates[1].verify_witness::( + index.cs.gates[1].verify_witness::( 1, &witness, &index.cs, @@ -141,7 +148,7 @@ fn verify_range_check0_valid_witness() { // gates[0] is RangeCheck0 assert_eq!( - index.cs.gates[0].verify_witness::( + index.cs.gates[0].verify_witness::( 0, &witness, &index.cs, @@ -152,7 +159,7 @@ fn verify_range_check0_valid_witness() { // gates[1] is RangeCheck0 assert_eq!( - index.cs.gates[1].verify_witness::( + index.cs.gates[1].verify_witness::( 1, &witness, &index.cs, @@ -172,7 +179,7 @@ fn verify_range_check0_valid_witness() { // gates[0] is RangeCheck0 assert_eq!( - index.cs.gates[0].verify_witness::( + index.cs.gates[0].verify_witness::( 0, &witness, &index.cs, @@ -183,7 +190,7 @@ fn verify_range_check0_valid_witness() { // gates[1] is RangeCheck0 assert_eq!( - index.cs.gates[1].verify_witness::( + index.cs.gates[1].verify_witness::( 1, &witness, &index.cs, @@ -211,7 +218,7 @@ fn verify_range_check0_invalid_witness() { // gates[0] is RangeCheck0 assert_eq!( - index.cs.gates[0].verify_witness::( + index.cs.gates[0].verify_witness::( 0, &witness, &index.cs, @@ -229,7 +236,7 @@ fn verify_range_check0_invalid_witness() { // gates[1] is RangeCheck0 assert_eq!( - index.cs.gates[1].verify_witness::( + index.cs.gates[1].verify_witness::( 1, &witness, &index.cs, @@ -256,7 +263,7 @@ fn verify_range_check0_invalid_witness() { // gates[0] is RangeCheck0 assert_eq!( - index.cs.gates[0].verify_witness::( + index.cs.gates[0].verify_witness::( 0, &witness, &index.cs, @@ -270,7 +277,7 @@ fn verify_range_check0_invalid_witness() { // gates[1] is RangeCheck0 assert_eq!( - index.cs.gates[1].verify_witness::( + index.cs.gates[1].verify_witness::( 1, &witness, &index.cs, @@ -292,7 +299,7 @@ fn verify_range_check0_valid_v0_in_range() { // gates[0] is RangeCheck0 and contains v0 assert_eq!( - index.cs.gates[0].verify_witness::( + index.cs.gates[0].verify_witness::( 0, &witness, &index.cs, @@ -309,7 +316,7 @@ fn verify_range_check0_valid_v0_in_range() { // gates[0] is RangeCheck0 and contains v0 assert_eq!( - index.cs.gates[0].verify_witness::( + index.cs.gates[0].verify_witness::( 0, &witness, &index.cs, @@ -326,7 +333,7 @@ fn verify_range_check0_valid_v0_in_range() { // gates[0] is RangeCheck0 and contains v0 assert_eq!( - index.cs.gates[0].verify_witness::( + index.cs.gates[0].verify_witness::( 0, &witness, &index.cs, @@ -343,7 +350,7 @@ fn verify_range_check0_valid_v0_in_range() { // gates[0] is RangeCheck0 and contains v0 assert_eq!( - index.cs.gates[0].verify_witness::( + index.cs.gates[0].verify_witness::( 0, &witness, &index.cs, @@ -365,7 +372,7 @@ fn verify_range_check0_valid_v1_in_range() { // gates[1] is RangeCheck0 and contains v1 assert_eq!( - index.cs.gates[1].verify_witness::( + index.cs.gates[1].verify_witness::( 1, &witness, &index.cs, @@ -382,7 +389,7 @@ fn verify_range_check0_valid_v1_in_range() { // gates[1] is RangeCheck0 and contains v1 assert_eq!( - index.cs.gates[1].verify_witness::( + index.cs.gates[1].verify_witness::( 1, &witness, &index.cs, @@ -399,7 +406,7 @@ fn verify_range_check0_valid_v1_in_range() { // gates[1] is RangeCheck0 and contains v1 assert_eq!( - index.cs.gates[1].verify_witness::( + index.cs.gates[1].verify_witness::( 1, &witness, &index.cs, @@ -416,7 +423,7 @@ fn verify_range_check0_valid_v1_in_range() { // gates[1] is RangeCheck0 and contains v1 assert_eq!( - index.cs.gates[1].verify_witness::( + index.cs.gates[1].verify_witness::( 1, &witness, &index.cs, @@ -438,7 +445,7 @@ fn verify_range_check0_invalid_v0_not_in_range() { // gates[0] is RangeCheck0 and contains v0 assert_eq!( - index.cs.gates[0].verify_witness::( + index.cs.gates[0].verify_witness::( 0, &witness, &index.cs, @@ -455,7 +462,7 @@ fn verify_range_check0_invalid_v0_not_in_range() { // gates[0] is RangeCheck0 and contains v0 assert_eq!( - index.cs.gates[0].verify_witness::( + index.cs.gates[0].verify_witness::( 0, &witness, &index.cs, @@ -477,7 +484,7 @@ fn verify_range_check0_invalid_v1_not_in_range() { // gates[1] is RangeCheck0 and contains v1 assert_eq!( - index.cs.gates[1].verify_witness::( + index.cs.gates[1].verify_witness::( 1, &witness, &index.cs, @@ -494,7 +501,7 @@ fn verify_range_check0_invalid_v1_not_in_range() { // gates[1] is RangeCheck0 and contains v1 assert_eq!( - index.cs.gates[1].verify_witness::( + index.cs.gates[1].verify_witness::( 1, &witness, &index.cs, @@ -519,7 +526,7 @@ fn verify_range_check0_test_copy_constraints() { // Positive test case (gates[row] is a RangeCheck0 circuit gate) assert_eq!( - index.cs.gates[row].verify_witness::( + index.cs.gates[row].verify_witness::( row, &witness, &index.cs, @@ -532,7 +539,7 @@ fn verify_range_check0_test_copy_constraints() { assert_ne!(witness[col][row], PallasField::zero()); witness[col][row] = PallasField::zero(); assert_eq!( - index.cs.gates[row].verify_witness::( + index.cs.gates[row].verify_witness::( row, &witness, &index.cs, @@ -564,7 +571,7 @@ fn verify_range_check0_v0_test_lookups() { // Positive test // gates[0] is RangeCheck0 and constrains some of v0 assert_eq!( - index.cs.gates[0].verify_witness::( + index.cs.gates[0].verify_witness::( 0, &witness, &index.cs, @@ -573,7 +580,7 @@ fn verify_range_check0_v0_test_lookups() { Ok(()) ); - let test_runner = TestFramework::::default() + let test_runner = TestFramework::::default() .gates(index.cs.gates) .setup(); @@ -617,7 +624,7 @@ fn verify_range_check0_v1_test_lookups() { // Positive test // gates[1] is RangeCheck0 and constrains some of v1 assert_eq!( - index.cs.gates[1].verify_witness::( + index.cs.gates[1].verify_witness::( 1, &witness, &index.cs, @@ -626,7 +633,7 @@ fn verify_range_check0_v1_test_lookups() { Ok(()) ); - let test_runner = TestFramework::::default() + let test_runner = TestFramework::::default() .gates(index.cs.gates) .setup(); @@ -660,11 +667,12 @@ fn verify_range_check0_v1_test_lookups() { #[test] fn verify_range_check1_zero_valid_witness() { let index = create_test_prover_index(0, false); - let witness: [Vec; COLUMNS] = array::from_fn(|_| vec![PallasField::from(0); 4]); + let witness: [Vec; KIMCHI_COLS] = + array::from_fn(|_| vec![PallasField::from(0); 4]); // gates[2] is RangeCheck1 assert_eq!( - index.cs.gates[2].verify_witness::( + index.cs.gates[2].verify_witness::( 2, &witness, &index.cs, @@ -677,11 +685,12 @@ fn verify_range_check1_zero_valid_witness() { #[test] fn verify_range_check1_one_invalid_witness() { let index = create_test_prover_index(0, false); - let witness: [Vec; COLUMNS] = array::from_fn(|_| vec![PallasField::from(1); 4]); + let witness: [Vec; KIMCHI_COLS] = + array::from_fn(|_| vec![PallasField::from(1); 4]); // gates[2] is RangeCheck1 assert_eq!( - index.cs.gates[2].verify_witness::( + index.cs.gates[2].verify_witness::( 2, &witness, &index.cs, @@ -706,7 +715,7 @@ fn verify_range_check1_valid_witness() { // gates[2] is RangeCheck1 assert_eq!( - index.cs.gates[2].verify_witness::( + index.cs.gates[2].verify_witness::( 2, &witness, &index.cs, @@ -726,7 +735,7 @@ fn verify_range_check1_valid_witness() { // gates[2] is RangeCheck1 assert_eq!( - index.cs.gates[2].verify_witness::( + index.cs.gates[2].verify_witness::( 2, &witness, &index.cs, @@ -754,7 +763,7 @@ fn verify_range_check1_invalid_witness() { // gates[2] is RangeCheck1 assert_eq!( - index.cs.gates[2].verify_witness::( + index.cs.gates[2].verify_witness::( 2, &witness, &index.cs, @@ -777,7 +786,7 @@ fn verify_range_check1_invalid_witness() { // gates[2] is RangeCheck1 assert_eq!( - index.cs.gates[2].verify_witness::( + index.cs.gates[2].verify_witness::( 2, &witness, &index.cs, @@ -799,7 +808,7 @@ fn verify_range_check1_valid_v2_in_range() { // gates[2] is RangeCheck1 and constrains v2 assert_eq!( - index.cs.gates[2].verify_witness::( + index.cs.gates[2].verify_witness::( 2, &witness, &index.cs, @@ -816,7 +825,7 @@ fn verify_range_check1_valid_v2_in_range() { // gates[2] is RangeCheck1 and constrains v2 assert_eq!( - index.cs.gates[2].verify_witness::( + index.cs.gates[2].verify_witness::( 2, &witness, &index.cs, @@ -833,7 +842,7 @@ fn verify_range_check1_valid_v2_in_range() { // gates[2] is RangeCheck1 and constrains v2 assert_eq!( - index.cs.gates[2].verify_witness::( + index.cs.gates[2].verify_witness::( 2, &witness, &index.cs, @@ -850,7 +859,7 @@ fn verify_range_check1_valid_v2_in_range() { // gates[2] is RangeCheck1 and constrains v2 assert_eq!( - index.cs.gates[2].verify_witness::( + index.cs.gates[2].verify_witness::( 2, &witness, &index.cs, @@ -872,7 +881,7 @@ fn verify_range_check1_invalid_v2_not_in_range() { // gates[2] is RangeCheck1 and constrains v2 assert_eq!( - index.cs.gates[2].verify_witness::( + index.cs.gates[2].verify_witness::( 2, &witness, &index.cs, @@ -889,7 +898,7 @@ fn verify_range_check1_invalid_v2_not_in_range() { // gates[2] is RangeCheck1 and constrains v2 assert_eq!( - index.cs.gates[2].verify_witness::( + index.cs.gates[2].verify_witness::( 2, &witness, &index.cs, @@ -914,7 +923,7 @@ fn verify_range_check1_test_copy_constraints() { // Positive test case (gates[2] is a RangeCheck1 circuit gate) assert_eq!( - index.cs.gates[2].verify_witness::( + index.cs.gates[2].verify_witness::( 2, &witness, &index.cs, @@ -929,7 +938,7 @@ fn verify_range_check1_test_copy_constraints() { // RangeCheck1's current row doesn't have any copy constraints assert_eq!( - index.cs.gates[2].verify_witness::( + index.cs.gates[2].verify_witness::( 2, &witness, &index.cs, @@ -940,7 +949,7 @@ fn verify_range_check1_test_copy_constraints() { // RangeCheck1's next row has copy constraints, but it's a Zero gate assert_eq!( - index.cs.gates[3].verify_witness::( + index.cs.gates[3].verify_witness::( 3, &witness, &index.cs, @@ -971,7 +980,7 @@ fn verify_range_check1_test_curr_row_lookups() { // Positive test // gates[2] is RangeCheck1 and constrains v2 assert_eq!( - index.cs.gates[2].verify_witness::( + index.cs.gates[2].verify_witness::( 2, &witness, &index.cs, @@ -980,7 +989,7 @@ fn verify_range_check1_test_curr_row_lookups() { Ok(()) ); - let test_runner = TestFramework::::default() + let test_runner = TestFramework::::default() .gates(index.cs.gates) .setup(); @@ -1020,7 +1029,7 @@ fn verify_range_check1_test_next_row_lookups() { // Positive test case (gates[2] is RangeCheck1 and constrains // both v0's and v1's lookups that are deferred to 4th row) assert_eq!( - index.cs.gates[2].verify_witness::( + index.cs.gates[2].verify_witness::( 2, &witness, &index.cs, @@ -1029,7 +1038,7 @@ fn verify_range_check1_test_next_row_lookups() { Ok(()) ); - let test_runner = TestFramework::::default() + let test_runner = TestFramework::::default() .gates(index.cs.gates) .setup(); @@ -1082,7 +1091,7 @@ fn verify_64_bit_range_check() { // Create constraint system let cs = ConstraintSystem::::create(gates /*, mina_poseidon::pasta::fp_kimchi::params()*/) - .build::() + .build::() .unwrap(); let index = { @@ -1091,14 +1100,15 @@ fn verify_64_bit_range_check() { let srs = Arc::new(srs); let (endo_q, _endo_r) = endos::(); - ProverIndex::::create(cs, endo_q, srs) + ProverIndex::>::create(cs, endo_q, srs) }; // Witness layout (positive test case) // Row 0 1 2 3 ... 14 Gate // 0 0 0 0 0 ... 0 GenericPub // 1 0 0 X X ... X RangeCheck0 - let mut witness: [Vec; COLUMNS] = array::from_fn(|_| vec![PallasField::zero()]); + let mut witness: [Vec; KIMCHI_COLS] = + array::from_fn(|_| vec![PallasField::zero()]); range_check::witness::create::( PallasField::from(2u64).pow([64]) - PallasField::one(), // in range ) @@ -1108,7 +1118,7 @@ fn verify_64_bit_range_check() { // Positive test case assert_eq!( - index.cs.gates[1].verify_witness::( + index.cs.gates[1].verify_witness::( 1, &witness, &index.cs, @@ -1121,7 +1131,8 @@ fn verify_64_bit_range_check() { // Row 0 1 2 3 ... 14 Gate // 0 0 0 0 0 ... 0 GenericPub // 1 0 X X X ... X RangeCheck0 - let mut witness: [Vec; COLUMNS] = array::from_fn(|_| vec![PallasField::zero()]); + let mut witness: [Vec; KIMCHI_COLS] = + array::from_fn(|_| vec![PallasField::zero()]); range_check::witness::create::( PallasField::from(2u64).pow([64]), // out of range ) @@ -1131,7 +1142,7 @@ fn verify_64_bit_range_check() { // Negative test case assert_eq!( - index.cs.gates[1].verify_witness::( + index.cs.gates[1].verify_witness::( 1, &witness, &index.cs, @@ -1163,7 +1174,7 @@ fn compact_multi_range_check() { // Positive test assert_eq!( - index.cs.gates[1].verify_witness::( + index.cs.gates[1].verify_witness::( 1, &witness, &index.cs, @@ -1177,7 +1188,7 @@ fn compact_multi_range_check() { // Negative test assert_eq!( - index.cs.gates[1].verify_witness::( + index.cs.gates[1].verify_witness::( 1, &witness, &index.cs, @@ -1217,7 +1228,7 @@ fn verify_range_check_valid_proof1() { let verifier_index = prover_index.verifier_index(); // Verify proof - let res = verify::( + let res = verify::, KIMCHI_COLS>( &group_map, &verifier_index, &proof, @@ -1240,7 +1251,7 @@ fn verify_compact_multi_range_check_proof() { let (_next_row, gates) = CircuitGate::::create_compact_multi_range_check(0); - TestFramework::::default() + TestFramework::::default() .gates(gates) .witness(witness) .setup() diff --git a/kimchi/src/tests/recursion.rs b/kimchi/src/tests/recursion.rs index 51819c8d4a..1aff106d8c 100644 --- a/kimchi/src/tests/recursion.rs +++ b/kimchi/src/tests/recursion.rs @@ -1,6 +1,6 @@ use super::framework::TestFramework; use crate::circuits::polynomials::generic::testing::{create_circuit, fill_in_witness}; -use crate::circuits::wires::COLUMNS; +use crate::circuits::wires::KIMCHI_COLS; use crate::proof::RecursionChallenge; use ark_ff::{UniformRand, Zero}; use ark_poly::univariate::DensePolynomial; @@ -11,7 +11,7 @@ use mina_poseidon::{ sponge::{DefaultFqSponge, DefaultFrSponge}, }; use o1_utils::math; -use poly_commitment::commitment::b_poly_coefficients; +use poly_commitment::{commitment::b_poly_coefficients, SRS as _}; use rand::prelude::*; use std::array; @@ -21,14 +21,14 @@ type ScalarSponge = DefaultFrSponge; #[test] fn test_recursion() { - let gates = create_circuit::(0, 0); + let gates = create_circuit::(0, 0); // create witness - let mut witness: [Vec; COLUMNS] = array::from_fn(|_| vec![Fp::zero(); gates.len()]); + let mut witness: [Vec; KIMCHI_COLS] = array::from_fn(|_| vec![Fp::zero(); gates.len()]); fill_in_witness(0, &mut witness, &[]); // setup - let test_runner = TestFramework::::default() + let test_runner = TestFramework::::default() .num_prev_challenges(1) .gates(gates) .witness(witness) @@ -43,7 +43,7 @@ fn test_recursion() { let comm = { let coeffs = b_poly_coefficients(&chals); let b = DensePolynomial::from_coefficients_vec(coeffs); - index.srs.commit_non_hiding(&b, None) + index.srs.commit_non_hiding(&b, 1, None) }; RecursionChallenge::new(chals, comm) }; diff --git a/kimchi/src/tests/rot.rs b/kimchi/src/tests/rot.rs index be823c2177..09b87108ef 100644 --- a/kimchi/src/tests/rot.rs +++ b/kimchi/src/tests/rot.rs @@ -5,7 +5,7 @@ use crate::{ circuits::{ constraints::ConstraintSystem, gate::{CircuitGate, CircuitGateError, Connect, GateType}, - polynomial::COLUMNS, + polynomial::KIMCHI_COLS, polynomials::{ generic::GenericGateSpec, keccak::{DIM, OFF}, @@ -27,7 +27,10 @@ use mina_poseidon::{ FqSponge, }; use o1_utils::Two; -use poly_commitment::srs::{endos, SRS}; +use poly_commitment::{ + evaluation_proof::OpeningProof, + srs::{endos, SRS}, +}; use rand::{rngs::StdRng, Rng, SeedableRng}; type PallasField = ::BaseField; @@ -63,12 +66,12 @@ fn create_rot_witness( word: u64, rot: u32, side: RotMode, -) -> [Vec; COLUMNS] +) -> [Vec; KIMCHI_COLS] where G::BaseField: PrimeField, { // Include the zero row - let mut witness: [Vec; COLUMNS] = + let mut witness: [Vec; KIMCHI_COLS] = array::from_fn(|_| vec![G::ScalarField::zero()]); rot::extend_rot(&mut witness, word, rot, side); witness @@ -84,7 +87,9 @@ where // gate for the zero value let gates = create_rot_gadget::(rot, side); - ConstraintSystem::create(gates).build::().unwrap() + ConstraintSystem::create(gates) + .build::() + .unwrap() } // Function to create a prover and verifier to test the ROT circuit @@ -92,7 +97,7 @@ fn prove_and_verify() where G::BaseField: PrimeField, EFqSponge: Clone + FqSponge, - EFrSponge: FrSponge, + EFrSponge: FrSponge, { let rng = &mut StdRng::from_seed(RNG_SEED); let rot = rng.gen_range(1..64); @@ -105,7 +110,7 @@ where // Create witness let witness = create_rot_witness::(word, rot, RotMode::Left); - TestFramework::::default() + TestFramework::::default() .gates(gates) .witness(witness) .setup() @@ -127,7 +132,7 @@ where let (witness, cs) = setup_rot::(word, rot, side); for row in 0..=2 { assert_eq!( - cs.gates[row].verify_witness::( + cs.gates[row].verify_witness::( row, &witness, &cs, @@ -144,7 +149,7 @@ fn setup_rot( rot: u32, side: RotMode, ) -> ( - [Vec; COLUMNS], + [Vec; KIMCHI_COLS], ConstraintSystem, ) where @@ -175,22 +180,20 @@ fn test_rot_random() { test_rot::(word, rot, RotMode::Right); } -#[should_panic] #[test] // Test that a bad rotation fails as expected fn test_zero_rot() { let rng = &mut StdRng::from_seed(RNG_SEED); let word = rng.gen_range(0..2u128.pow(64)) as u64; - create_rot_witness::(word, 0, RotMode::Left); + test_rot::(word, 0, RotMode::Left); } -#[should_panic] #[test] // Test that a bad rotation fails as expected fn test_large_rot() { let rng = &mut StdRng::from_seed(RNG_SEED); let word = rng.gen_range(0..2u128.pow(64)) as u64; - create_rot_witness::(word, 64, RotMode::Left); + test_rot::(word, 64, RotMode::Left); } #[test] @@ -207,7 +210,7 @@ fn test_bad_constraints() { witness[i + 7][1] += PallasField::from(4u32); // Decomposition constraint fails assert_eq!( - cs.gates[1].verify_witness::( + cs.gates[1].verify_witness::( 1, &witness, &cs, @@ -224,7 +227,12 @@ fn test_bad_constraints() { witness[0][1] += PallasField::one(); // Decomposition constraint fails assert_eq!( - cs.gates[1].verify_witness::(1, &witness, &cs, &witness[0][0..cs.public]), + cs.gates[1].verify_witness::( + 1, + &witness, + &cs, + &witness[0][0..cs.public] + ), Err(CircuitGateError::Constraint(GateType::Rot64, 9)) ); // undo @@ -235,7 +243,12 @@ fn test_bad_constraints() { witness[1][1] += PallasField::one(); // Rotated word is wrong assert_eq!( - cs.gates[1].verify_witness::(1, &witness, &cs, &witness[0][0..cs.public]), + cs.gates[1].verify_witness::( + 1, + &witness, + &cs, + &witness[0][0..cs.public] + ), Err(CircuitGateError::Constraint(GateType::Rot64, 10)) ); // undo @@ -248,7 +261,7 @@ fn test_bad_constraints() { witness[i + 3][1] += PallasField::one(); // Bound constraint fails assert_eq!( - cs.gates[1].verify_witness::( + cs.gates[1].verify_witness::( 1, &witness, &cs, @@ -264,11 +277,21 @@ fn test_bad_constraints() { witness[2][1] += PallasField::one(); witness[0][3] += PallasField::one(); assert_eq!( - cs.gates[1].verify_witness::(1, &witness, &cs, &witness[0][0..cs.public]), + cs.gates[1].verify_witness::( + 1, + &witness, + &cs, + &witness[0][0..cs.public] + ), Err(CircuitGateError::Constraint(GateType::Rot64, 9)) ); assert_eq!( - cs.gates[3].verify_witness::(3, &witness, &cs, &witness[0][0..cs.public]), + cs.gates[3].verify_witness::( + 3, + &witness, + &cs, + &witness[0][0..cs.public] + ), Err(CircuitGateError::Constraint(GateType::RangeCheck0, 9)) ); witness[2][1] -= PallasField::one(); @@ -277,11 +300,21 @@ fn test_bad_constraints() { // modify shifted witness[0][2] += PallasField::one(); assert_eq!( - cs.gates[1].verify_witness::(1, &witness, &cs, &witness[0][0..cs.public]), + cs.gates[1].verify_witness::( + 1, + &witness, + &cs, + &witness[0][0..cs.public] + ), Err(CircuitGateError::Constraint(GateType::Rot64, 9)) ); assert_eq!( - cs.gates[2].verify_witness::(2, &witness, &cs, &witness[0][0..cs.public]), + cs.gates[2].verify_witness::( + 2, + &witness, + &cs, + &witness[0][0..cs.public] + ), Err(CircuitGateError::Constraint(GateType::RangeCheck0, 9)) ); witness[0][2] -= PallasField::one(); @@ -289,14 +322,24 @@ fn test_bad_constraints() { // modify value of shifted to be more than 64 bits witness[0][2] += PallasField::two_pow(64); assert_eq!( - cs.gates[2].verify_witness::(2, &witness, &cs, &witness[0][0..cs.public]), + cs.gates[2].verify_witness::( + 2, + &witness, + &cs, + &witness[0][0..cs.public] + ), Err(CircuitGateError::Constraint(GateType::RangeCheck0, 9)) ); // Update decomposition witness[2][2] += PallasField::one(); // Make sure the 64-bit check fails assert_eq!( - cs.gates[2].verify_witness::(2, &witness, &cs, &witness[0][0..cs.public]), + cs.gates[2].verify_witness::( + 2, + &witness, + &cs, + &witness[0][0..cs.public] + ), Err(CircuitGateError::CopyConstraint { typ: GateType::RangeCheck0, src: Wire { row: 2, col: 2 }, @@ -310,14 +353,24 @@ fn test_bad_constraints() { witness[0][3] += PallasField::two_pow(64); witness[2][1] += PallasField::two_pow(64); assert_eq!( - cs.gates[3].verify_witness::(3, &witness, &cs, &witness[0][0..cs.public]), + cs.gates[3].verify_witness::( + 3, + &witness, + &cs, + &witness[0][0..cs.public] + ), Err(CircuitGateError::Constraint(GateType::RangeCheck0, 9)) ); // Update decomposition witness[2][3] += PallasField::one(); // Make sure the 64-bit check fails assert_eq!( - cs.gates[3].verify_witness::(3, &witness, &cs, &witness[0][0..cs.public]), + cs.gates[3].verify_witness::( + 3, + &witness, + &cs, + &witness[0][0..cs.public] + ), Err(CircuitGateError::CopyConstraint { typ: GateType::RangeCheck0, src: Wire { row: 3, col: 2 }, @@ -356,7 +409,7 @@ fn test_rot_finalization() { // witness let witness = { // create one row for the public word - let mut cols: [_; COLUMNS] = array::from_fn(|_col| vec![Fp::zero(); 2]); + let mut cols: [_; KIMCHI_COLS] = array::from_fn(|_col| vec![Fp::zero(); 2]); // initialize the public input containing the word to be rotated let input = 0xDC811727DAF22EC1u64; @@ -370,19 +423,19 @@ fn test_rot_finalization() { let cs = ConstraintSystem::create(gates.clone()) .public(num_public_inputs) .lookup(vec![rot::lookup_table()]) - .build::() + .build::() .unwrap(); let mut srs = SRS::::create(cs.domain.d1.size()); srs.add_lagrange_basis(cs.domain.d1); let srs = Arc::new(srs); let (endo_q, _endo_r) = endos::(); - ProverIndex::::create(cs, endo_q, srs) + ProverIndex::>::create(cs, endo_q, srs) }; for row in 0..witness[0].len() { assert_eq!( - index.cs.gates[row].verify_witness::( + index.cs.gates[row].verify_witness::( row, &witness, &index.cs, @@ -392,7 +445,7 @@ fn test_rot_finalization() { ); } - TestFramework::::default() + TestFramework::::default() .gates(gates) .witness(witness.clone()) .public_inputs(vec![witness[0][0], witness[0][1]]) @@ -425,12 +478,15 @@ fn test_keccak_table() { gates.connect_64bit(zero_row, rot_row - 1); } } - let cs = ConstraintSystem::create(gates).build::().unwrap(); + let cs = ConstraintSystem::create(gates) + .build::() + .unwrap(); let state: [[u64; DIM]; DIM] = array::from_fn(|_| { array::from_fn(|_| rand::thread_rng().gen_range(0..2u128.pow(64)) as u64) }); - let mut witness: [Vec; COLUMNS] = array::from_fn(|_| vec![PallasField::zero()]); + let mut witness: [Vec; KIMCHI_COLS] = + array::from_fn(|_| vec![PallasField::zero()]); for (y, col) in OFF.iter().enumerate() { for (x, &rot) in col.iter().enumerate() { if rot == 0 { @@ -442,7 +498,7 @@ fn test_keccak_table() { for row in 0..=48 { assert_eq!( - cs.gates[row].verify_witness::( + cs.gates[row].verify_witness::( row, &witness, &cs, diff --git a/kimchi/src/tests/serde.rs b/kimchi/src/tests/serde.rs index 6c6501b7ca..50ecebb72a 100644 --- a/kimchi/src/tests/serde.rs +++ b/kimchi/src/tests/serde.rs @@ -2,7 +2,7 @@ use crate::{ bench::BenchmarkCtx, circuits::{ polynomials::generic::testing::{create_circuit, fill_in_witness}, - wires::COLUMNS, + wires::KIMCHI_COLS, }, proof::ProverProof, prover_index::testing::new_index_for_test, @@ -17,7 +17,7 @@ use mina_poseidon::{ constants::PlonkSpongeConstantsKimchi, sponge::{DefaultFqSponge, DefaultFrSponge}, }; -use poly_commitment::{commitment::CommitmentCurve, srs::SRS}; +use poly_commitment::{commitment::CommitmentCurve, evaluation_proof::OpeningProof, srs::SRS}; use std::array; use std::time::Instant; @@ -41,7 +41,8 @@ mod tests { println!("proof size: {} bytes", ser_pf.len()); // deserialize the proof - let de_pf: ProverProof = rmp_serde::from_slice(&ser_pf).unwrap(); + let de_pf: ProverProof> = + rmp_serde::from_slice(&ser_pf).unwrap(); // verify the deserialized proof (must accept the proof) ctx.batch_verification(&vec![(de_pf, public_input)]); @@ -50,10 +51,10 @@ mod tests { #[test] pub fn test_serialization() { let public = vec![Fp::from(3u8); 5]; - let gates = create_circuit::(0, public.len()); + let gates = create_circuit::(0, public.len()); // create witness - let mut witness: [Vec; COLUMNS] = array::from_fn(|_| vec![Fp::zero(); gates.len()]); + let mut witness: [Vec; KIMCHI_COLS] = array::from_fn(|_| vec![Fp::zero(); gates.len()]); fill_in_witness(0, &mut witness, &public); let index = new_index_for_test(gates, public.len()); @@ -72,7 +73,7 @@ mod tests { .unwrap(); // deserialize the verifier index - let mut verifier_index_deserialize: VerifierIndex> = + let mut verifier_index_deserialize: VerifierIndex, _> = serde_json::from_str(&verifier_index_serialize).unwrap(); // add srs with lagrange bases @@ -80,10 +81,11 @@ mod tests { srs.add_lagrange_basis(verifier_index.domain); verifier_index_deserialize.powers_of_alpha = index.powers_of_alpha; verifier_index_deserialize.linearization = index.linearization; + verifier_index_deserialize.srs = std::sync::Arc::new(srs); // verify the proof let start = Instant::now(); - verify::( + verify::, KIMCHI_COLS>( &group_map, &verifier_index_deserialize, &proof, diff --git a/kimchi/src/tests/turshi.rs b/kimchi/src/tests/turshi.rs index 971bc86a71..aed7e850c1 100644 --- a/kimchi/src/tests/turshi.rs +++ b/kimchi/src/tests/turshi.rs @@ -1,6 +1,6 @@ use crate::circuits::{ gate::CircuitGate, - polynomial::COLUMNS, + polynomial::KIMCHI_COLS, polynomials::turshi::{testing::*, witness::*}, }; use mina_curves::pasta::Fp as F; @@ -22,7 +22,7 @@ fn test_cairo_should_fail() { let inirow = 0; let (circuit, _) = CircuitGate::::create_cairo_gadget(inirow, ninstr); - let mut witness = cairo_witness::(&prog); + let mut witness = cairo_witness::(&prog); // break a witness witness[0][0] += F::from(1u32); let res_ensure = ensure_cairo_gate(&circuit[0], 0, &witness); @@ -68,7 +68,7 @@ fn test_cairo_gate() { mem.write(F::from(23u32), F::from(44u32)); //end of program let prog = CairoProgram::new(&mut mem, 5); - let witness = cairo_witness::(&prog); + let witness = cairo_witness::(&prog); // Create the Cairo circuit let ninstr = prog.trace().len(); diff --git a/kimchi/src/tests/varbasemul.rs b/kimchi/src/tests/varbasemul.rs index df46e12f7d..160f374766 100644 --- a/kimchi/src/tests/varbasemul.rs +++ b/kimchi/src/tests/varbasemul.rs @@ -46,7 +46,7 @@ fn varbase_mul_test() { )); } - let mut witness: [Vec; COLUMNS] = + let mut witness: [Vec; KIMCHI_COLS] = array::from_fn(|_| vec![F::zero(); rows_per_scalar * num_scalars]); let rng = &mut StdRng::from_seed([0; 32]); @@ -89,7 +89,7 @@ fn varbase_mul_test() { start.elapsed() ); - TestFramework::::default() + TestFramework::::default() .gates(gates) .witness(witness) .setup() diff --git a/kimchi/src/tests/xor.rs b/kimchi/src/tests/xor.rs index b615451e75..994f92e22c 100644 --- a/kimchi/src/tests/xor.rs +++ b/kimchi/src/tests/xor.rs @@ -4,7 +4,7 @@ use crate::{ circuits::{ constraints::ConstraintSystem, gate::{CircuitGate, CircuitGateError, Connect, GateType}, - polynomial::COLUMNS, + polynomial::KIMCHI_COLS, polynomials::{generic::GenericGateSpec, xor}, wires::Wire, }, @@ -21,7 +21,10 @@ use mina_poseidon::{ }; use num_bigint::BigUint; use o1_utils::{BigUintHelpers, BitwiseOps, FieldHelpers, RandomField}; -use poly_commitment::srs::{endos, SRS}; +use poly_commitment::{ + evaluation_proof::OpeningProof, + srs::{endos, SRS}, +}; use rand::{rngs::StdRng, SeedableRng}; use super::framework::TestFramework; @@ -50,7 +53,9 @@ where let mut gates = vec![]; let _next_row = CircuitGate::::extend_xor_gadget(&mut gates, bits); - ConstraintSystem::create(gates).build::().unwrap() + ConstraintSystem::create(gates) + .build::() + .unwrap() } // Returns the all ones BigUint of bits length @@ -65,7 +70,7 @@ pub(crate) fn xor_nybble(word: BigUint, nybble: usize) -> BigUint { // Manually checks the XOR of each nybble in the witness pub(crate) fn check_xor( - witness: &[Vec; COLUMNS], + witness: &[Vec; KIMCHI_COLS], bits: usize, input1: G::ScalarField, input2: G::ScalarField, @@ -102,7 +107,7 @@ fn setup_xor( bits: Option, ) -> ( ConstraintSystem, - [Vec; COLUMNS], + [Vec; KIMCHI_COLS], ) where G::BaseField: PrimeField, @@ -133,14 +138,14 @@ fn test_xor( in1: Option, in2: Option, bits: Option, -) -> [Vec; COLUMNS] +) -> [Vec; KIMCHI_COLS] where G::BaseField: PrimeField, { let (cs, witness) = setup_xor::(in1, in2, bits); for row in 0..witness[0].len() { assert_eq!( - cs.gates[row].verify_witness::( + cs.gates[row].verify_witness::( row, &witness, &cs, @@ -168,7 +173,7 @@ fn test_prove_and_verify_xor() { // Create witness and random inputs let witness = xor::create_xor_witness(input1, input2, bits); - TestFramework::::default() + TestFramework::::default() .gates(gates) .witness(witness) .setup() @@ -243,26 +248,31 @@ fn test_xor128_random() { } fn verify_bad_xor_decomposition( - witness: &mut [Vec; COLUMNS], + witness: &mut [Vec; KIMCHI_COLS], cs: ConstraintSystem, ) where G::BaseField: PrimeField, { // modify by one each of the witness cells individually - for col in 0..COLUMNS { + for col in 0..KIMCHI_COLS { // first three columns make fail the ith+1 constraint // for the rest, the first 4 make the 1st fail, the following 4 make the 2nd fail, the last 4 make the 3rd fail let bad = if col < 3 { col + 1 } else { (col - 3) / 4 + 1 }; witness[col][0] += G::ScalarField::one(); assert_eq!( - cs.gates[0].verify_witness::(0, witness, &cs, &witness[0][0..cs.public]), + cs.gates[0].verify_witness::( + 0, + witness, + &cs, + &witness[0][0..cs.public] + ), Err(CircuitGateError::Constraint(GateType::Xor16, bad)) ); witness[col][0] -= G::ScalarField::one(); } // undo changes assert_eq!( - cs.gates[0].verify_witness::(0, witness, &cs, &witness[0][0..cs.public]), + cs.gates[0].verify_witness::(0, witness, &cs, &witness[0][0..cs.public]), Ok(()) ); } @@ -304,17 +314,17 @@ fn test_extend_xor() { let cs = ConstraintSystem::create(gates) .public(2) - .build::() + .build::() .unwrap(); - let mut witness: [_; COLUMNS] = array::from_fn(|_col| vec![Fp::zero(); 2]); + let mut witness: [_; KIMCHI_COLS] = array::from_fn(|_col| vec![Fp::zero(); 2]); witness[0][0] = input1; witness[0][1] = input2; - xor::extend_xor_witness::(&mut witness, input1, input2, bits); + xor::extend_xor_witness::(&mut witness, input1, input2, bits); for row in 0..witness[0].len() { assert_eq!( - cs.gates[row].verify_witness::( + cs.gates[row].verify_witness::( row, &witness, &cs, @@ -347,11 +357,11 @@ fn test_bad_xor() { // modify the output to be all zero witness[2][0] = PallasField::zero(); for i in 1..=4 { - witness[COLUMNS - i][0] = PallasField::zero(); + witness[KIMCHI_COLS - i][0] = PallasField::zero(); } assert_eq!( - TestFramework::::default() + TestFramework::::default() .gates(gates) .witness(witness) .setup() @@ -388,7 +398,7 @@ fn test_xor_finalization() { // witness let witness = { - let mut cols: [_; COLUMNS] = array::from_fn(|_col| vec![Fp::zero(); num_inputs]); + let mut cols: [_; KIMCHI_COLS] = array::from_fn(|_col| vec![Fp::zero(); num_inputs]); // initialize the 2 inputs let input1 = 0xDC811727DAF22EC15927D6AA275F406Bu128.into(); @@ -396,7 +406,7 @@ fn test_xor_finalization() { cols[0][0] = input1; cols[0][1] = input2; - xor::extend_xor_witness::(&mut cols, input1, input2, 128); + xor::extend_xor_witness(&mut cols, input1, input2, 128); cols }; @@ -404,19 +414,19 @@ fn test_xor_finalization() { let cs = ConstraintSystem::create(gates.clone()) .lookup(vec![xor::lookup_table()]) .public(num_inputs) - .build::() + .build::() .unwrap(); let mut srs = SRS::::create(cs.domain.d1.size()); srs.add_lagrange_basis(cs.domain.d1); let srs = Arc::new(srs); let (endo_q, _endo_r) = endos::(); - ProverIndex::::create(cs, endo_q, srs) + ProverIndex::>::create(cs, endo_q, srs) }; for row in 0..witness[0].len() { assert_eq!( - index.cs.gates[row].verify_witness::( + index.cs.gates[row].verify_witness::( row, &witness, &index.cs, @@ -426,7 +436,7 @@ fn test_xor_finalization() { ); } - TestFramework::::default() + TestFramework::::default() .gates(gates) .witness(witness.clone()) .public_inputs(vec![witness[0][0], witness[0][1]]) diff --git a/kimchi/src/verifier.rs b/kimchi/src/verifier.rs index 203427193a..c46fcaa8a1 100644 --- a/kimchi/src/verifier.rs +++ b/kimchi/src/verifier.rs @@ -3,47 +3,57 @@ use crate::{ circuits::{ argument::ArgumentType, + berkeley_columns::Column, constraints::ConstraintSystem, - expr::{Column, Constants, PolishToken}, + expr::{Constants, PolishToken}, gate::GateType, - lookup::tables::combine_table, + lookup::{lookups::LookupPattern, tables::combine_table}, polynomials::permutation, scalars::RandomOracles, - wires::PERMUTS, + wires::{KIMCHI_COLS, PERMUTS}, }, curve::KimchiCurve, error::VerifyError, oracles::OraclesResult, plonk_sponge::FrSponge, - proof::{ - LookupEvaluations, PointEvaluations, ProofEvaluations, ProverProof, RecursionChallenge, - }, + proof::{PointEvaluations, ProofEvaluations, ProverProof, RecursionChallenge}, verifier_index::VerifierIndex, }; use ark_ec::AffineCurve; use ark_ff::{Field, One, PrimeField, Zero}; -use ark_poly::{EvaluationDomain, Polynomial}; +use ark_poly::{univariate::DensePolynomial, EvaluationDomain, Polynomial}; use mina_poseidon::{sponge::ScalarChallenge, FqSponge}; -use poly_commitment::commitment::{ - absorb_commitment, combined_inner_product, BatchEvaluationProof, Evaluation, PolyComm, +use o1_utils::ExtendedDensePolynomial; +use poly_commitment::{ + commitment::{ + absorb_commitment, combined_inner_product, BatchEvaluationProof, Evaluation, PolyComm, + }, + OpenProof, SRS as _, }; use rand::thread_rng; /// The result of a proof verification. pub type Result = std::result::Result; -pub struct Context<'a, const W: usize, G: KimchiCurve> { +pub struct Context< + 'a, + G: KimchiCurve, + OpeningProof: OpenProof, + const COLUMNS: usize = KIMCHI_COLS, +> { /// The [VerifierIndex] associated to the proof - pub verifier_index: &'a VerifierIndex, + pub verifier_index: &'a VerifierIndex, /// The proof to verify - pub proof: &'a ProverProof, + pub proof: &'a ProverProof, /// The public input used in the creation of the proof pub public_input: &'a [G::ScalarField], } -impl<'a, const W: usize, G: KimchiCurve> Context<'a, W, G> { +impl<'a, G: KimchiCurve, OpeningProof: OpenProof, const COLUMNS: usize> + Context<'a, G, OpeningProof, COLUMNS> +{ pub fn get_column(&self, col: Column) -> Option<&'a PolyComm> { use Column::*; match col { @@ -64,7 +74,7 @@ impl<'a, const W: usize, G: KimchiCurve> Context<'a, W, G> { .runtime_tables_selector .as_ref()?, ), - LookupRuntimeTable => None, + LookupRuntimeTable => self.proof.commitments.lookup.as_ref()?.runtime.as_ref(), Index(t) => { use GateType::*; match t { @@ -91,7 +101,8 @@ impl<'a, const W: usize, G: KimchiCurve> Context<'a, W, G> { } } -impl ProverProof +impl, const COLUMNS: usize> + ProverProof where G::BaseField: PrimeField, { @@ -106,12 +117,12 @@ where /// Will panic if `PolishToken` evaluation is invalid. pub fn oracles< EFqSponge: Clone + FqSponge, - EFrSponge: FrSponge, + EFrSponge: FrSponge, >( &self, - index: &VerifierIndex, + index: &VerifierIndex, public_comm: &PolyComm, - public_input: &[G::ScalarField], + public_input: Option<&[G::ScalarField]>, ) -> Result> { //~ //~ #### Fiat-Shamir argument @@ -121,8 +132,19 @@ where let n = index.domain.size; let (_, endo_r) = G::endos(); + let chunk_size = { + let d1_size = index.domain.size(); + if d1_size < index.max_poly_size { + 1 + } else { + d1_size / index.max_poly_size + } + }; + + let zk_rows = index.zk_rows; + //~ 1. Setup the Fq-Sponge. - let mut fq_sponge = EFqSponge::new(G::OtherCurve::sponge_params()); + let mut fq_sponge = EFqSponge::new(G::other_curve_sponge_params()); //~ 1. Absorb the digest of the VerifierIndex. let verifier_index_digest = index.digest::(); @@ -214,9 +236,13 @@ where //~ 1. Derive $\alpha$ from $\alpha'$ using the endomorphism (TODO: details). let alpha = alpha_chal.to_field(endo_r); - //~ 1. Enforce that the length of the $t$ commitment is of size `PERMUTS`. - if self.commitments.t_comm.unshifted.len() != PERMUTS { - return Err(VerifyError::IncorrectCommitmentLength("t")); + //~ 1. Enforce that the length of the $t$ commitment is of size 7. + if self.commitments.t_comm.unshifted.len() > chunk_size * 7 { + return Err(VerifyError::IncorrectCommitmentLength( + "t", + chunk_size * 7, + self.commitments.t_comm.unshifted.len(), + )); } //~ 1. Absorb the commitment to the quotient polynomial $t$ into the argument. @@ -278,45 +304,53 @@ where let mut all_alphas = index.powers_of_alpha.clone(); all_alphas.instantiate(alpha); - // compute Lagrange base evaluation denominators - let w: Vec<_> = index.domain.elements().take(public_input.len()).collect(); + let public_evals = if let Some(public_evals) = &self.evals.public { + [public_evals.zeta.clone(), public_evals.zeta_omega.clone()] + } else if chunk_size > 1 { + return Err(VerifyError::MissingPublicInputEvaluation); + } else if let Some(public_input) = public_input { + // compute Lagrange base evaluation denominators + let w: Vec<_> = index.domain.elements().take(public_input.len()).collect(); - let mut zeta_minus_x: Vec<_> = w.iter().map(|w| zeta - w).collect(); + let mut zeta_minus_x: Vec<_> = w.iter().map(|w| zeta - w).collect(); - w.iter() - .take(public_input.len()) - .for_each(|w| zeta_minus_x.push(zetaw - w)); + w.iter() + .take(public_input.len()) + .for_each(|w| zeta_minus_x.push(zetaw - w)); - ark_ff::fields::batch_inversion::(&mut zeta_minus_x); + ark_ff::fields::batch_inversion::(&mut zeta_minus_x); - //~ 1. Evaluate the negated public polynomial (if present) at $\zeta$ and $\zeta\omega$. - //~ - //~ NOTE: this works only in the case when the poly segment size is not smaller than that of the domain. - let public_evals = if public_input.is_empty() { - [vec![G::ScalarField::zero()], vec![G::ScalarField::zero()]] + //~ 1. Evaluate the negated public polynomial (if present) at $\zeta$ and $\zeta\omega$. + //~ + //~ NOTE: this works only in the case when the poly segment size is not smaller than that of the domain. + if public_input.is_empty() { + [vec![G::ScalarField::zero()], vec![G::ScalarField::zero()]] + } else { + [ + vec![ + (public_input + .iter() + .zip(zeta_minus_x.iter()) + .zip(index.domain.elements()) + .map(|((p, l), w)| -*l * p * w) + .fold(G::ScalarField::zero(), |x, y| x + y)) + * (zeta1 - G::ScalarField::one()) + * index.domain.size_inv, + ], + vec![ + (public_input + .iter() + .zip(zeta_minus_x[public_input.len()..].iter()) + .zip(index.domain.elements()) + .map(|((p, l), w)| -*l * p * w) + .fold(G::ScalarField::zero(), |x, y| x + y)) + * index.domain.size_inv + * (zetaw.pow([n]) - G::ScalarField::one()), + ], + ] + } } else { - [ - vec![ - (public_input - .iter() - .zip(zeta_minus_x.iter()) - .zip(index.domain.elements()) - .map(|((p, l), w)| -*l * p * w) - .fold(G::ScalarField::zero(), |x, y| x + y)) - * (zeta1 - G::ScalarField::one()) - * index.domain.size_inv, - ], - vec![ - (public_input - .iter() - .zip(zeta_minus_x[public_input.len()..].iter()) - .zip(index.domain.elements()) - .map(|((p, l), w)| -*l * p * w) - .fold(G::ScalarField::zero(), |x, y| x + y)) - * index.domain.size_inv - * (zetaw.pow([n]) - G::ScalarField::one()), - ], - ] + return Err(VerifyError::MissingPublicInputEvaluation); }; //~ 1. Absorb the unique evaluation of ft: $ft(\zeta\omega)$. @@ -351,7 +385,8 @@ where //~ 1. Compute the evaluation of $ft(\zeta)$. let ft_eval0 = { - let zkp = index.zkpm().evaluate(&zeta); + let permutation_vanishing_polynomial = + index.permutation_vanishing_polynomial_m().evaluate(&zeta); let zeta1m1 = zeta1 - G::ScalarField::one(); let mut alpha_powers = @@ -366,7 +401,10 @@ where .next() .expect("missing power of alpha for permutation"); - let init = (evals.w[PERMUTS - 1].zeta + gamma) * evals.z.zeta_omega * alpha0 * zkp; + let init = (evals.w[PERMUTS - 1].zeta + gamma) + * evals.z.zeta_omega + * alpha0 + * permutation_vanishing_polynomial; let mut ft_eval0 = evals .w .iter() @@ -374,18 +412,20 @@ where .map(|(w, s)| (beta * s.zeta) + w.zeta + gamma) .fold(init, |x, y| x * y); - ft_eval0 -= if public_evals[0].is_empty() { - G::ScalarField::zero() - } else { - public_evals[0][0] - }; + ft_eval0 -= DensePolynomial::eval_polynomial( + &public_evals[0], + powers_of_eval_points_for_chunks.zeta, + ); ft_eval0 -= evals .w .iter() .zip(index.shift.iter()) .map(|(w, s)| gamma + (beta * zeta * s) + w.zeta) - .fold(alpha0 * zkp * evals.z.zeta, |x, y| x * y); + .fold( + alpha0 * permutation_vanishing_polynomial * evals.z.zeta, + |x, y| x * y, + ); let numerator = ((zeta1m1 * alpha1 * (zeta - index.w())) + (zeta1m1 * alpha2 * (zeta - G::ScalarField::one()))) @@ -403,6 +443,7 @@ where joint_combiner: joint_combiner.as_ref().map(|j| j.1), endo_coefficient: index.endo, mds: &G::sponge_params().mds, + zk_rows, }; ft_eval0 -= PolishToken::evaluate( @@ -417,57 +458,146 @@ where ft_eval0 }; - let combined_inner_product = { - let ft_eval0 = vec![ft_eval0]; - let ft_eval1 = vec![self.ft_eval1]; - - #[allow(clippy::type_complexity)] - let mut es: Vec<(Vec>, Option)> = - polys.iter().map(|(_, e)| (e.clone(), None)).collect(); - es.push((public_evals.to_vec(), None)); - es.push((vec![ft_eval0, ft_eval1], None)); - for col in [ - Column::Z, - Column::Index(GateType::Generic), - Column::Index(GateType::Poseidon), - ] - .into_iter() - .chain((0..W).map(Column::Witness)) - .chain((0..W).map(Column::Coefficient)) - .chain((0..PERMUTS - 1).map(Column::Permutation)) - .chain( - index - .lookup_index - .as_ref() - .map(|li| { - (0..li.lookup_info.max_per_row + 1) - .map(Column::LookupSorted) - .chain([Column::LookupAggreg, Column::LookupTable].into_iter()) - .chain( - li.runtime_tables_selector - .as_ref() - .map(|_| [Column::LookupRuntimeTable].into_iter()) - .into_iter() - .flatten(), + let combined_inner_product = + { + let ft_eval0 = vec![ft_eval0]; + let ft_eval1 = vec![self.ft_eval1]; + + #[allow(clippy::type_complexity)] + let mut es: Vec<(Vec>, Option)> = + polys.iter().map(|(_, e)| (e.clone(), None)).collect(); + es.push((public_evals.to_vec(), None)); + es.push((vec![ft_eval0, ft_eval1], None)); + for col in [ + Column::Z, + Column::Index(GateType::Generic), + Column::Index(GateType::Poseidon), + Column::Index(GateType::CompleteAdd), + Column::Index(GateType::VarBaseMul), + Column::Index(GateType::EndoMul), + Column::Index(GateType::EndoMulScalar), + ] + .into_iter() + .chain((0..COLUMNS).map(Column::Witness)) + .chain((0..COLUMNS).map(Column::Coefficient)) + .chain((0..PERMUTS - 1).map(Column::Permutation)) + .chain( + index + .range_check0_comm + .as_ref() + .map(|_| Column::Index(GateType::RangeCheck0)), + ) + .chain( + index + .range_check1_comm + .as_ref() + .map(|_| Column::Index(GateType::RangeCheck1)), + ) + .chain( + index + .foreign_field_add_comm + .as_ref() + .map(|_| Column::Index(GateType::ForeignFieldAdd)), + ) + .chain( + index + .foreign_field_mul_comm + .as_ref() + .map(|_| Column::Index(GateType::ForeignFieldMul)), + ) + .chain( + index + .xor_comm + .as_ref() + .map(|_| Column::Index(GateType::Xor16)), + ) + .chain( + index + .rot_comm + .as_ref() + .map(|_| Column::Index(GateType::Rot64)), + ) + .chain( + index + .keccak_round_comm + .as_ref() + .map(|_| Column::Index(GateType::KeccakRound)), + ) + .chain( + index + .keccak_sponge_comm + .as_ref() + .map(|_| Column::Index(GateType::KeccakSponge)), + ) + .chain( + index + .lookup_index + .as_ref() + .map(|li| { + (0..li.lookup_info.max_per_row + 1) + .map(Column::LookupSorted) + .chain([Column::LookupAggreg, Column::LookupTable].into_iter()) + .chain( + li.runtime_tables_selector + .as_ref() + .map(|_| [Column::LookupRuntimeTable].into_iter()) + .into_iter() + .flatten(), + ) + .chain( + self.evals + .runtime_lookup_table_selector + .as_ref() + .map(|_| Column::LookupRuntimeSelector), + ) + .chain( + self.evals + .xor_lookup_selector + .as_ref() + .map(|_| Column::LookupKindIndex(LookupPattern::Xor)), + ) + .chain( + self.evals + .lookup_gate_lookup_selector + .as_ref() + .map(|_| Column::LookupKindIndex(LookupPattern::Lookup)), + ) + .chain( + self.evals.range_check_lookup_selector.as_ref().map(|_| { + Column::LookupKindIndex(LookupPattern::RangeCheck) + }), + ) + .chain(self.evals.foreign_field_mul_lookup_selector.as_ref().map( + |_| Column::LookupKindIndex(LookupPattern::ForeignFieldMul), + )) + /*.chain( + self.evals.keccak_round_lookup_selector.as_ref().map(|_| { + Column::LookupKindIndex(LookupPattern::KeccakRound) + }), ) - }) - .into_iter() - .flatten(), - ) { - es.push(( - { - let evals = self - .evals - .get_column(col) - .ok_or(VerifyError::MissingEvaluation(col))?; - vec![evals.zeta.clone(), evals.zeta_omega.clone()] - }, - None, - )) - } + .chain( + self.evals.keccak_sponge_lookup_selector.as_ref().map(|_| { + Column::LookupKindIndex(LookupPattern::KeccakSponge) + }), + )*/ + }) + .into_iter() + .flatten(), + ) { + es.push(( + { + let evals = self + .evals + .get_column(col) + .ok_or(VerifyError::MissingEvaluation(col))?; + vec![evals.zeta.clone(), evals.zeta_omega.clone()] + }, + None, + )) + } - combined_inner_product(&evaluation_points, &v, &u, &es, index.srs().g.len()) - }; + combined_inner_product(&evaluation_points, &v, &u, &es, index.srs().max_poly_size()) + }; let oracles = RandomOracles { joint_combiner, @@ -501,71 +631,176 @@ where /// Enforce the length of evaluations inside [`Proof`]. /// Atm, the length of evaluations(both `zeta` and `zeta_omega`) SHOULD be 1. /// The length value is prone to future change. -fn check_proof_evals_len(proof: &ProverProof) -> Result<()> +fn check_proof_evals_len( + proof: &ProverProof, + expected_size: usize, +) -> Result<()> where G: KimchiCurve, G::BaseField: PrimeField, { let ProofEvaluations { + public, w, z, s, coefficients, - lookup, generic_selector, poseidon_selector, + complete_add_selector, + mul_selector, + emul_selector, + endomul_scalar_selector, + range_check0_selector, + range_check1_selector, + foreign_field_add_selector, + foreign_field_mul_selector, + xor_selector, + rot_selector, + keccak_round_selector, + keccak_sponge_selector, + lookup_aggregation, + lookup_table, + lookup_sorted, + runtime_lookup_table, + runtime_lookup_table_selector, + xor_lookup_selector, + lookup_gate_lookup_selector, + range_check_lookup_selector, + foreign_field_mul_lookup_selector, + //keccak_round_lookup_selector, + //keccak_sponge_lookup_selector, } = &proof.evals; - let check_eval_len = |eval: &PointEvaluations>| -> Result<()> { - if eval.zeta.len().is_one() && eval.zeta_omega.len().is_one() { - Ok(()) + let check_eval_len = |eval: &PointEvaluations>, str: &'static str| -> Result<()> { + if eval.zeta.len() != expected_size { + Err(VerifyError::IncorrectEvaluationsLength( + expected_size, + eval.zeta.len(), + str, + )) + } else if eval.zeta_omega.len() != expected_size { + Err(VerifyError::IncorrectEvaluationsLength( + expected_size, + eval.zeta_omega.len(), + str, + )) } else { - Err(VerifyError::IncorrectEvaluationsLength) + Ok(()) } }; + if let Some(public) = public { + check_eval_len(public, "public input")?; + } + for w_i in w { - check_eval_len(w_i)?; + check_eval_len(w_i, "witness")?; } - check_eval_len(z)?; + check_eval_len(z, "permutation accumulator")?; for s_i in s { - check_eval_len(s_i)?; + check_eval_len(s_i, "permutation shifts")?; } for coeff in coefficients { - check_eval_len(coeff)?; - } - if let Some(LookupEvaluations { - sorted, - aggreg, - table, - runtime, - }) = lookup - { - for sorted_i in sorted { - check_eval_len(sorted_i)?; - } - check_eval_len(aggreg)?; - check_eval_len(table)?; - if let Some(runtime) = &runtime { - check_eval_len(runtime)?; - } + check_eval_len(coeff, "coefficients")?; + } + + // Lookup evaluations + for sorted in lookup_sorted.iter().flatten() { + check_eval_len(sorted, "lookup sorted")? + } + + if let Some(lookup_aggregation) = lookup_aggregation { + check_eval_len(lookup_aggregation, "lookup aggregation")?; + } + if let Some(lookup_table) = lookup_table { + check_eval_len(lookup_table, "lookup table")?; + } + if let Some(runtime_lookup_table) = runtime_lookup_table { + check_eval_len(runtime_lookup_table, "runtime lookup table")?; + } + + check_eval_len(generic_selector, "generic selector")?; + check_eval_len(poseidon_selector, "poseidon selector")?; + check_eval_len(complete_add_selector, "complete add selector")?; + check_eval_len(mul_selector, "mul selector")?; + check_eval_len(emul_selector, "endomul selector")?; + check_eval_len(endomul_scalar_selector, "endomul scalar selector")?; + + // Optional gates + + if let Some(range_check0_selector) = range_check0_selector { + check_eval_len(range_check0_selector, "range check 0 selector")? + } + if let Some(range_check1_selector) = range_check1_selector { + check_eval_len(range_check1_selector, "range check 1 selector")? + } + if let Some(foreign_field_add_selector) = foreign_field_add_selector { + check_eval_len(foreign_field_add_selector, "foreign field add selector")? + } + if let Some(foreign_field_mul_selector) = foreign_field_mul_selector { + check_eval_len(foreign_field_mul_selector, "foreign field mul selector")? + } + if let Some(xor_selector) = xor_selector { + check_eval_len(xor_selector, "xor selector")? + } + if let Some(rot_selector) = rot_selector { + check_eval_len(rot_selector, "rot selector")? + } + if let Some(keccak_round_selector) = keccak_round_selector { + check_eval_len(keccak_round_selector, "keccak round selector")? + } + if let Some(keccak_sponge_selector) = keccak_sponge_selector { + check_eval_len(keccak_sponge_selector, "keccak sponge selector")? + } + + // Lookup selectors + + if let Some(runtime_lookup_table_selector) = runtime_lookup_table_selector { + check_eval_len( + runtime_lookup_table_selector, + "runtime lookup table selector", + )? + } + if let Some(xor_lookup_selector) = xor_lookup_selector { + check_eval_len(xor_lookup_selector, "xor lookup selector")? + } + if let Some(lookup_gate_lookup_selector) = lookup_gate_lookup_selector { + check_eval_len(lookup_gate_lookup_selector, "lookup gate lookup selector")? + } + if let Some(range_check_lookup_selector) = range_check_lookup_selector { + check_eval_len(range_check_lookup_selector, "range check lookup selector")? } - check_eval_len(generic_selector)?; - check_eval_len(poseidon_selector)?; + if let Some(foreign_field_mul_lookup_selector) = foreign_field_mul_lookup_selector { + check_eval_len( + foreign_field_mul_lookup_selector, + "foreign field mul lookup selector", + )? + } + /* + if let Some(keccak_round_lookup_selector) = keccak_round_lookup_selector { + check_eval_len(keccak_round_lookup_selector, "keccak round lookup selector")? + } + if let Some(keccak_sponge_lookup_selector) = keccak_sponge_lookup_selector { + check_eval_len( + keccak_sponge_lookup_selector, + "keccak sponge lookup selector", + )? + }*/ Ok(()) } -fn to_batch<'a, const W: usize, G, EFqSponge, EFrSponge>( - verifier_index: &VerifierIndex, - proof: &'a ProverProof, +fn to_batch<'a, G, EFqSponge, EFrSponge, OpeningProof: OpenProof, const COLUMNS: usize>( + verifier_index: &VerifierIndex, + proof: &'a ProverProof, public_input: &'a [::ScalarField], -) -> Result> +) -> Result> where G: KimchiCurve, G::BaseField: PrimeField, EFqSponge: Clone + FqSponge, - EFrSponge: FrSponge, + EFrSponge: FrSponge, { //~ //~ #### Partial verification @@ -575,6 +810,8 @@ where //~ Essentially, this steps verifies that $f(\zeta) = t(\zeta) * Z_H(\zeta)$. //~ + let zk_rows = verifier_index.zk_rows; + if proof.prev_challenges.len() != verifier_index.prev_challenges { return Err(VerifyError::IncorrectPrevChallengesLength( verifier_index.prev_challenges, @@ -588,7 +825,15 @@ where } //~ 1. Check the length of evaluations inside the proof. - check_proof_evals_len(proof)?; + let chunk_size = { + let d1_size = verifier_index.domain.size(); + if d1_size < verifier_index.max_poly_size { + 1 + } else { + d1_size / verifier_index.max_poly_size + } + }; + check_proof_evals_len(proof, chunk_size)?; //~ 1. Commit to the negated public input polynomial. let public_comm = { @@ -599,23 +844,26 @@ where } let lgr_comm = verifier_index .srs() - .lagrange_bases - .get(&verifier_index.domain.size()) + .get_lagrange_basis(verifier_index.domain.size()) .expect("pre-computed committed lagrange bases not found"); let com: Vec<_> = lgr_comm.iter().take(verifier_index.public).collect(); - let elm: Vec<_> = public_input.iter().map(|s| -*s).collect(); - let public_comm = PolyComm::::multi_scalar_mul(&com, &elm); - verifier_index - .srs() - .mask_custom( - public_comm, - &PolyComm { - unshifted: vec![G::ScalarField::one(); 1], - shifted: None, - }, + if public_input.is_empty() { + PolyComm::new( + vec![verifier_index.srs().blinding_commitment(); chunk_size], + None, ) - .unwrap() - .commitment + } else { + let elm: Vec<_> = public_input.iter().map(|s| -*s).collect(); + let public_comm = PolyComm::::multi_scalar_mul(&com, &elm); + verifier_index + .srs() + .mask_custom( + public_comm.clone(), + &public_comm.map(|_| G::ScalarField::one()), + ) + .unwrap() + .commitment + } }; //~ 1. Run the [Fiat-Shamir argument](#fiat-shamir-argument). @@ -630,7 +878,7 @@ where ft_eval0, combined_inner_product, .. - } = proof.oracles::(verifier_index, &public_comm, public_input)?; + } = proof.oracles::(verifier_index, &public_comm, Some(public_input))?; //~ 1. Combine the chunked polynomials' evaluations //~ (TODO: most likely only the quotient polynomial is chunked) @@ -653,7 +901,9 @@ where //~ in which case the evaluation should be used in place of the commitment. let f_comm = { // the permutation is written manually (not using the expr framework) - let zkp = verifier_index.zkpm().evaluate(&oracles.zeta); + let permutation_vanishing_polynomial = verifier_index + .permutation_vanishing_polynomial_m() + .evaluate(&oracles.zeta); let alphas = all_alphas.get_alphas(ArgumentType::Permutation, permutation::CONSTRAINTS); @@ -663,7 +913,7 @@ where oracles.beta, oracles.gamma, alphas, - zkp, + permutation_vanishing_polynomial, )]; // other gates are implemented using the expression framework @@ -676,6 +926,7 @@ where joint_combiner: oracles.joint_combiner.as_ref().map(|j| j.1), endo_coefficient: verifier_index.endo, mds: &G::sponge_params().mds, + zk_rows, }; for (col, tokens) in &verifier_index.linearization.index_terms { @@ -742,14 +993,55 @@ where //~~ * index commitments that use the coefficients Column::Index(GateType::Generic), Column::Index(GateType::Poseidon), + Column::Index(GateType::CompleteAdd), + Column::Index(GateType::VarBaseMul), + Column::Index(GateType::EndoMul), + Column::Index(GateType::EndoMulScalar), ] .into_iter() //~~ * witness commitments - .chain((0..W).map(Column::Witness)) + .chain((0..COLUMNS).map(Column::Witness)) //~~ * coefficient commitments - .chain((0..W).map(Column::Coefficient)) + .chain((0..COLUMNS).map(Column::Coefficient)) //~~ * sigma commitments .chain((0..PERMUTS - 1).map(Column::Permutation)) + //~~ * optional gate commitments + .chain( + verifier_index + .range_check0_comm + .as_ref() + .map(|_| Column::Index(GateType::RangeCheck0)), + ) + .chain( + verifier_index + .range_check1_comm + .as_ref() + .map(|_| Column::Index(GateType::RangeCheck1)), + ) + .chain( + verifier_index + .foreign_field_add_comm + .as_ref() + .map(|_| Column::Index(GateType::ForeignFieldAdd)), + ) + .chain( + verifier_index + .foreign_field_mul_comm + .as_ref() + .map(|_| Column::Index(GateType::ForeignFieldMul)), + ) + .chain( + verifier_index + .xor_comm + .as_ref() + .map(|_| Column::Index(GateType::Xor16)), + ) + .chain( + verifier_index + .rot_comm + .as_ref() + .map(|_| Column::Index(GateType::Rot64)), + ) //~~ * lookup commitments //~ .chain( @@ -786,11 +1078,13 @@ where .lookup .as_ref() .ok_or(VerifyError::LookupCommitmentMissing)?; - let lookup_eval = proof + + let lookup_table = proof .evals - .lookup + .lookup_table .as_ref() .ok_or(VerifyError::LookupEvalsMissing)?; + let runtime_lookup_table = proof.evals.runtime_lookup_table.as_ref(); // compute table commitment let table_comm = { @@ -815,10 +1109,7 @@ where // add evaluation of the table polynomial evaluations.push(Evaluation { commitment: table_comm, - evaluations: vec![ - lookup_eval.table.zeta.clone(), - lookup_eval.table.zeta_omega.clone(), - ], + evaluations: vec![lookup_table.zeta.clone(), lookup_table.zeta_omega.clone()], degree_bound: None, }); @@ -828,8 +1119,7 @@ where .runtime .as_ref() .ok_or(VerifyError::IncorrectRuntimeProof)?; - let runtime_eval = lookup_eval - .runtime + let runtime_eval = runtime_lookup_table .as_ref() .map(|x| x.map_ref(&|x| x.clone())) .ok_or(VerifyError::IncorrectRuntimeProof)?; @@ -842,6 +1132,68 @@ where } } + for col in verifier_index + .lookup_index + .as_ref() + .map(|li| { + (li.runtime_tables_selector + .as_ref() + .map(|_| Column::LookupRuntimeSelector)) + .into_iter() + .chain( + li.lookup_selectors + .xor + .as_ref() + .map(|_| Column::LookupKindIndex(LookupPattern::Xor)), + ) + .chain( + li.lookup_selectors + .lookup + .as_ref() + .map(|_| Column::LookupKindIndex(LookupPattern::Lookup)), + ) + .chain( + li.lookup_selectors + .range_check + .as_ref() + .map(|_| Column::LookupKindIndex(LookupPattern::RangeCheck)), + ) + .chain( + li.lookup_selectors + .ffmul + .as_ref() + .map(|_| Column::LookupKindIndex(LookupPattern::ForeignFieldMul)), + ) + /* .chain( + li.lookup_selectors + .keccak_round + .as_ref() + .map(|_| Column::LookupKindIndex(LookupPattern::KeccakRound)), + ) + .chain( + li.lookup_selectors + .keccak_sponge + .as_ref() + .map(|_| Column::LookupKindIndex(LookupPattern::KeccakSponge)), + )*/ + }) + .into_iter() + .flatten() + { + let evals = proof + .evals + .get_column(col) + .ok_or(VerifyError::MissingEvaluation(col))?; + evaluations.push(Evaluation { + commitment: context + .get_column(col) + .ok_or(VerifyError::MissingCommitment(col))? + .clone(), + evaluations: vec![evals.zeta.clone(), evals.zeta_omega.clone()], + degree_bound: None, + }); + } + // prepare for the opening proof verification let evaluation_points = vec![oracles.zeta, oracles.zeta * verifier_index.domain.group_gen]; Ok(BatchEvaluationProof { @@ -860,24 +1212,24 @@ where /// # Errors /// /// Will give error if `proof(s)` are not verified as valid. -pub fn verify( +pub fn verify, const COLUMNS: usize>( group_map: &G::Map, - verifier_index: &VerifierIndex, - proof: &ProverProof, + verifier_index: &VerifierIndex, + proof: &ProverProof, public_input: &[G::ScalarField], ) -> Result<()> where G: KimchiCurve, G::BaseField: PrimeField, EFqSponge: Clone + FqSponge, - EFrSponge: FrSponge, + EFrSponge: FrSponge, { let proofs = vec![Context { verifier_index, proof, public_input, }]; - batch_verify::(group_map, &proofs) + batch_verify::(group_map, &proofs) } /// This function verifies the batch of zk-proofs @@ -887,15 +1239,15 @@ where /// # Errors /// /// Will give error if `srs` of `proof` is invalid or `verify` process fails. -pub fn batch_verify( +pub fn batch_verify, const COLUMNS: usize>( group_map: &G::Map, - proofs: &[Context], + proofs: &[Context], ) -> Result<()> where G: KimchiCurve, G::BaseField: PrimeField, EFqSponge: Clone + FqSponge, - EFrSponge: FrSponge, + EFrSponge: FrSponge, { //~ #### Batch verification of proofs //~ @@ -913,14 +1265,9 @@ where // TODO: Account for the different SRS lengths let srs = proofs[0].verifier_index.srs(); for &Context { verifier_index, .. } in proofs { - if verifier_index.srs().g.len() != srs.g.len() { + if verifier_index.srs().max_poly_size() != srs.max_poly_size() { return Err(VerifyError::DifferentSRS); } - - // also make sure that the SRS is not smaller than the domain size - if verifier_index.srs().max_degree() < verifier_index.domain.size() { - return Err(VerifyError::SRSTooSmall); - } } //~ 1. Validate each proof separately following the [partial verification](#partial-verification) steps. @@ -931,7 +1278,7 @@ where public_input, } in proofs { - batch.push(to_batch::( + batch.push(to_batch::( verifier_index, proof, public_input, @@ -939,7 +1286,7 @@ where } //~ 1. Use the [`PolyCom.verify`](#polynomial-commitments) to verify the partially evaluated proofs. - if srs.verify::(group_map, &mut batch, &mut thread_rng()) { + if OpeningProof::verify(srs, group_map, &mut batch, &mut thread_rng()) { Ok(()) } else { Err(VerifyError::OpenProof) diff --git a/kimchi/src/verifier_index.rs b/kimchi/src/verifier_index.rs index 4563a47a0d..501b70be42 100644 --- a/kimchi/src/verifier_index.rs +++ b/kimchi/src/verifier_index.rs @@ -4,13 +4,13 @@ use crate::{ alphas::Alphas, circuits::{ + berkeley_columns::Column, expr::{Linearization, PolishToken}, lookup::{index::LookupSelectors, lookups::LookupInfo}, - polynomials::permutation::{zk_polynomial, zk_w3}, - wires::PERMUTS, + polynomials::permutation::{vanishes_on_last_n_rows, zk_w}, + wires::{KIMCHI_COLS, PERMUTS}, }, curve::KimchiCurve, - error::VerifierIndexError, prover_index::ProverIndex, }; use ark_ff::{One, PrimeField}; @@ -19,7 +19,7 @@ use mina_poseidon::FqSponge; use once_cell::sync::OnceCell; use poly_commitment::{ commitment::{CommitmentCurve, PolyComm}, - srs::SRS, + OpenProof, SRS as _, }; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use serde_with::serde_as; @@ -56,15 +56,22 @@ pub struct LookupVerifierIndex { #[serde_as] #[derive(Serialize, Deserialize, Debug, Clone)] -pub struct VerifierIndex { +pub struct VerifierIndex< + G: KimchiCurve, + OpeningProof: OpenProof, + const COLUMNS: usize = KIMCHI_COLS, +> { /// evaluation domain #[serde_as(as = "o1_utils::serialization::SerdeAs")] pub domain: D, /// maximal size of polynomial section pub max_poly_size: usize, + /// the number of randomized rows to achieve zero knowledge + pub zk_rows: u64, /// polynomial commitment keys #[serde(skip)] - pub srs: OnceCell>>, + #[serde(bound(deserialize = "OpeningProof::SRS: Default"))] + pub srs: Arc, /// number of public inputs pub public: usize, /// number of previous evaluation challenges, for recursive proving @@ -75,8 +82,8 @@ pub struct VerifierIndex { #[serde(bound = "PolyComm: Serialize + DeserializeOwned")] pub sigma_comm: [PolyComm; PERMUTS], /// coefficient commitment array - #[serde(bound = "PolyComm: Serialize + DeserializeOwned")] - pub coefficients_comm: Vec>, + #[serde_as(as = "[_; COLUMNS]")] + pub coefficients_comm: [PolyComm; COLUMNS], /// coefficient commitment array #[serde(bound = "PolyComm: Serialize + DeserializeOwned")] pub generic_comm: PolyComm, @@ -137,7 +144,7 @@ pub struct VerifierIndex { pub shift: [G::ScalarField; PERMUTS], /// zero-knowledge polynomial #[serde(skip)] - pub zkpm: OnceCell>, + pub permutation_vanishing_polynomial_m: OnceCell>, // TODO(mimoo): isn't this redundant with domain.d1.group_gen ? /// domain offset for zero-knowledge #[serde(skip)] @@ -150,20 +157,27 @@ pub struct VerifierIndex { pub lookup_index: Option>, #[serde(skip)] - pub linearization: Linearization>>, + pub linearization: Linearization>, Column>, /// The mapping between powers of alpha and constraints #[serde(skip)] pub powers_of_alpha: Alphas, } //~spec:endcode -impl ProverIndex { +impl, const COLUMNS: usize> + ProverIndex +where + G::BaseField: PrimeField, +{ /// Produces the [`VerifierIndex`] from the prover's [`ProverIndex`]. /// /// # Panics /// /// Will panic if `srs` cannot be in `cell`. - pub fn verifier_index(&self) -> VerifierIndex { + pub fn verifier_index(&self) -> VerifierIndex + where + VerifierIndex: Clone, + { if let Some(verifier_index) = &self.verifier_index { return verifier_index.clone(); } @@ -208,14 +222,11 @@ impl ProverIndex { VerifierIndex { domain, max_poly_size: self.max_poly_size, + zk_rows: self.cs.zk_rows, powers_of_alpha: self.powers_of_alpha.clone(), public: self.cs.public, prev_challenges: self.cs.prev_challenges, - srs: { - let cell = OnceCell::new(); - cell.set(Arc::clone(&self.srs)).unwrap(); - cell - }, + srs: Arc::clone(&self.srs), sigma_comm: array::from_fn(|i| { self.srs.commit_evaluations_non_hiding( @@ -223,12 +234,12 @@ impl ProverIndex { &self.column_evaluations.permutation_coefficients8[i], ) }), - coefficients_comm: self - .column_evaluations - .coefficients8 - .iter() - .map(|c| self.srs.commit_evaluations_non_hiding(domain, c)) - .collect::>(), + coefficients_comm: array::from_fn(|i| { + self.srs.commit_evaluations_non_hiding( + domain, + &self.column_evaluations.coefficients8[i], + ) + }), generic_comm: mask_fixed( self.srs.commit_evaluations_non_hiding( domain, @@ -241,21 +252,23 @@ impl ProverIndex { &self.column_evaluations.poseidon_selector8, )), - complete_add_comm: self.srs.commit_evaluations_non_hiding( + complete_add_comm: mask_fixed(self.srs.commit_evaluations_non_hiding( domain, &self.column_evaluations.complete_add_selector4, + )), + mul_comm: mask_fixed( + self.srs + .commit_evaluations_non_hiding(domain, &self.column_evaluations.mul_selector8), + ), + emul_comm: mask_fixed( + self.srs + .commit_evaluations_non_hiding(domain, &self.column_evaluations.emul_selector8), ), - mul_comm: self - .srs - .commit_evaluations_non_hiding(domain, &self.column_evaluations.mul_selector8), - emul_comm: self - .srs - .commit_evaluations_non_hiding(domain, &self.column_evaluations.emul_selector8), - - endomul_scalar_comm: self.srs.commit_evaluations_non_hiding( + + endomul_scalar_comm: mask_fixed(self.srs.commit_evaluations_non_hiding( domain, &self.column_evaluations.endomul_scalar_selector8, - ), + )), range_check0_comm: self .column_evaluations @@ -304,14 +317,20 @@ impl ProverIndex { .map(|eval8| self.srs.commit_evaluations_non_hiding(domain, eval8)), shift: self.cs.shift, - zkpm: { + permutation_vanishing_polynomial_m: { let cell = OnceCell::new(); - cell.set(self.cs.precomputations().zkpm.clone()).unwrap(); + cell.set( + self.cs + .precomputations() + .permutation_vanishing_polynomial_m + .clone(), + ) + .unwrap(); cell }, w: { let cell = OnceCell::new(); - cell.set(zk_w3(self.cs.domain.d1)).unwrap(); + cell.set(zk_w(self.cs.domain.d1, self.cs.zk_rows)).unwrap(); cell }, endo: self.cs.endo, @@ -321,27 +340,26 @@ impl ProverIndex { } } -impl VerifierIndex { +impl, const COLUMNS: usize> + VerifierIndex +{ /// Gets srs from [`VerifierIndex`] lazily - pub fn srs(&self) -> &Arc> + pub fn srs(&self) -> &Arc where G::BaseField: PrimeField, { - self.srs.get_or_init(|| { - let mut srs = SRS::::create(self.max_poly_size); - srs.add_lagrange_basis(self.domain); - Arc::new(srs) - }) + &self.srs } - /// Gets zkpm from [`VerifierIndex`] lazily - pub fn zkpm(&self) -> &DensePolynomial { - self.zkpm.get_or_init(|| zk_polynomial(self.domain)) + /// Gets permutation_vanishing_polynomial_m from [`VerifierIndex`] lazily + pub fn permutation_vanishing_polynomial_m(&self) -> &DensePolynomial { + self.permutation_vanishing_polynomial_m + .get_or_init(|| vanishes_on_last_n_rows(self.domain, self.zk_rows)) } /// Gets w from [`VerifierIndex`] lazily pub fn w(&self) -> &G::ScalarField { - self.w.get_or_init(|| zk_w3(self.domain)) + self.w.get_or_init(|| zk_w(self.domain, self.zk_rows)) } /// Deserializes a [`VerifierIndex`] from a file, given a pointer to an SRS and an optional offset in the file. @@ -350,12 +368,15 @@ impl VerifierIndex { /// /// Will give error if it fails to deserialize from file or unable to set `srs` in `verifier_index`. pub fn from_file( - srs: Option>>, + srs: Arc, path: &Path, offset: Option, // TODO: we shouldn't have to pass these endo: G::ScalarField, - ) -> Result { + ) -> Result + where + OpeningProof::SRS: Default, + { // open file let file = File::open(path).map_err(|e| e.to_string())?; @@ -370,13 +391,7 @@ impl VerifierIndex { .map_err(|e| e.to_string())?; // fill in the rest - if let Some(srs) = srs { - verifier_index - .srs - .set(srs) - .map_err(|_| VerifierIndexError::SRSHasBeenSet.to_string())?; - }; - + verifier_index.srs = srs; verifier_index.endo = endo; Ok(verifier_index) @@ -409,11 +424,12 @@ impl VerifierIndex { pub fn digest>( &self, ) -> G::BaseField { - let mut fq_sponge = EFqSponge::new(G::OtherCurve::sponge_params()); + let mut fq_sponge = EFqSponge::new(G::other_curve_sponge_params()); // We fully expand this to make the compiler check that we aren't missing any commitments let VerifierIndex { domain: _, max_poly_size: _, + zk_rows: _, srs: _, public: _, prev_challenges: _, @@ -442,7 +458,7 @@ impl VerifierIndex { lookup_index, shift: _, - zkpm: _, + permutation_vanishing_polynomial_m: _, w: _, endo: _, diff --git a/optimism/.gitignore b/optimism/.gitignore new file mode 100644 index 0000000000..53df36bb78 --- /dev/null +++ b/optimism/.gitignore @@ -0,0 +1 @@ +rpcs.sh diff --git a/optimism/Cargo.toml b/optimism/Cargo.toml new file mode 100644 index 0000000000..da634f3030 --- /dev/null +++ b/optimism/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "kimchi_optimism" +version = "0.1.0" +description = "MIPS demo" +repository = "https://github.com/o1-labs/proof-systems" +homepage = "https://o1-labs.github.io/proof-systems/" +documentation = "https://o1-labs.github.io/proof-systems/rustdoc/" +readme = "README.md" +edition = "2021" +license = "Apache-2.0" + +[lib] +path = "src/lib.rs" + +[dependencies] +ark-bn254 = { version = "0.3.0" } +kimchi = { path = "../kimchi", version = "0.1.0", features = [ "bn254" ] } +poly-commitment = { path = "../poly-commitment", version = "0.1.0" } +groupmap = { path = "../groupmap", version = "0.1.0" } +mina-curves = { path = "../curves", version = "0.1.0" } +mina-poseidon = { path = "../poseidon", version = "0.1.0" } +elf = "0.7.2" +rmp-serde = "1.1.1" +serde_json = "1.0.91" +serde = "1.0.130" +serde_with = "1.10.0" +ark-poly = { version = "0.3.0", features = [ "parallel" ] } +ark-ff = { version = "0.3.0", features = [ "parallel" ] } +clap = "4.4.6" +hex = "0.4.3" +regex = "1.10.2" +libflate = "2" +base64 = "0.21.5" diff --git a/optimism/README.md b/optimism/README.md new file mode 100644 index 0000000000..2dbf25c375 --- /dev/null +++ b/optimism/README.md @@ -0,0 +1,18 @@ +To run the demo: +* create an executable file `rpcs.sh` that looks like + ```bash + #!/usr/bin/env bash + export L1RPC=http://xxxxxxxxx + export L2RPC=http://xxxxxxxxx + ``` +* run the `run-code.sh` script. + +This will +* generate the initial state, +* execute the OP program, +* execute the OP program through the cannon MIPS VM, +* execute the OP program through the kimchi MIPS VM prover. + +The initial state will be output to a file with format `YYYY-MM-DD-HH-MM-SS-op-program-data-log.sh`. + +If you want to re-run against an existing state, pass the environment variable `FILENAME=YYYY-MM-DD-HH-MM-SS-op-program-data-log.sh` to the `run-code.sh` script. diff --git a/optimism/ethereum-optimism b/optimism/ethereum-optimism new file mode 160000 index 0000000000..c83cd947d4 --- /dev/null +++ b/optimism/ethereum-optimism @@ -0,0 +1 @@ +Subproject commit c83cd947d419aa2c213583a32872bc350a69e566 diff --git a/optimism/generate-config.sh b/optimism/generate-config.sh new file mode 100755 index 0000000000..3e8dae0221 --- /dev/null +++ b/optimism/generate-config.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env bash +set -euo pipefail + +source rpcs.sh + +# L2 output oracle on Goerli +# L2_OUTPUT_ORACLE=0xE6Dfba0953616Bacab0c9A8ecb3a9BBa77FC15c0 +# L2 output oracle on Sepolia +L2_OUTPUT_ORACLE=0x90E9c4f8a994a250F6aEfd61CAFb4F2e895D458F + +L2_FINALIZED_NUMBER=$(cast block finalized --rpc-url "${L2RPC}" -f number) +echo "Finalize number: ${L2_FINALIZED_NUMBER}" 1>&2 +L2_FINALIZED_HASH=$(cast block "${L2_FINALIZED_NUMBER}" --rpc-url "${L2RPC}" -f hash) + +L1_FINALIZED_NUMBER=$(cast block finalized --rpc-url "${L1RPC}" -f number) +L1_FINALIZED_HASH=$(cast block "${L1_FINALIZED_NUMBER}" --rpc-url "${L1RPC}" -f hash) + +OUTPUT_INDEX=$(cast call --rpc-url "${L1RPC}" "${L2_OUTPUT_ORACLE}" 'getL2OutputIndexAfter(uint256) returns(uint256)' "${L2_FINALIZED_NUMBER}") +OUTPUT_INDEX=$((OUTPUT_INDEX-1)) + +OUTPUT=$(cast call --rpc-url "${L1RPC}" "${L2_OUTPUT_ORACLE}" 'getL2Output(uint256) returns(bytes32,uint128,uint128)' "${OUTPUT_INDEX}") +OUTPUT_ROOT=$(echo ${OUTPUT} | cut -d' ' -f 1) +OUTPUT_TIMESTAMP=$(echo ${OUTPUT} | cut -d' ' -f 2) +OUTPUT_L2BLOCK_NUMBER=$(echo ${OUTPUT} | cut -d' ' -f 3) + +L1_HEAD=$L1_FINALIZED_HASH +L2_CLAIM=$OUTPUT_ROOT +L2_BLOCK_NUMBER=$OUTPUT_L2BLOCK_NUMBER + +STARTING_L2BLOCK_NUMBER=$((L2_BLOCK_NUMBER-100)) +STARTING_OUTPUT_INDEX=$(cast call --rpc-url "${L1RPC}" "${L2_OUTPUT_ORACLE}" 'getL2OutputIndexAfter(uint256) returns(uint256)' "${STARTING_L2BLOCK_NUMBER}") +STARTING_OUTPUT=$(cast call --rpc-url "${L1RPC}" "${L2_OUTPUT_ORACLE}" 'getL2Output(uint256) returns(bytes32,uint128,uint128)' "${STARTING_OUTPUT_INDEX}") +STARTING_OUTPUT_ROOT=$(echo ${OUTPUT} | cut -d' ' -f 1) +L2_HEAD_NUMBER=$(echo ${OUTPUT} | cut -d' ' -f 3) +L2_HEAD=$(cast block "${L2_HEAD_NUMBER}" --rpc-url "${L2RPC}" -f hash) + +TODAY=$(date +"%Y-%m-%d-%H-%M-%S") +FILENAME=${TODAY}-op-program-data-log.sh +OP_PROGRAM_DATA_DIR=$(pwd)/op-program-db-sepolia-${TODAY} + +echo "export L1_HEAD=${L1_HEAD}" >> ${FILENAME} +echo "export L2_HEAD=${L2_HEAD}" >> ${FILENAME} +echo "export L2_BLOCK_NUMBER=${L2_BLOCK_NUMBER}" >> ${FILENAME} +echo "export STARTING_OUTPUT_ROOT=${STARTING_OUTPUT_ROOT}" >> ${FILENAME} +echo "export L2_CLAIM=${L2_CLAIM}" >> ${FILENAME} +echo "export OP_PROGRAM_DATA_DIR=${OP_PROGRAM_DATA_DIR}" >> ${FILENAME} +echo "export L1RPC=${L1RPC}" >> ${FILENAME} +echo "export L2RPC=${L2RPC}" >> ${FILENAME} + +echo "${FILENAME}" diff --git a/optimism/run-code.sh b/optimism/run-code.sh new file mode 100755 index 0000000000..c664e58025 --- /dev/null +++ b/optimism/run-code.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +set -euo pipefail + +source rpcs.sh + +set +u +if [ -z "${FILENAME}" ]; then + FILENAME="$(./generate-config.sh)" +fi +set -u + +source $FILENAME + +./run-op-program.sh + +./run-vm.sh diff --git a/optimism/run-op-program.sh b/optimism/run-op-program.sh new file mode 100755 index 0000000000..c16072e8c9 --- /dev/null +++ b/optimism/run-op-program.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash +set -euo pipefail + +make -C ./ethereum-optimism/op-program op-program +make -C ./ethereum-optimism/cannon cannon + +set -x +./ethereum-optimism/op-program/bin/op-program \ + --log.level DEBUG \ + --l1 $L1RPC \ + --l2 $L2RPC \ + --network sepolia \ + --datadir ${OP_PROGRAM_DATA_DIR} \ + --l1.head $L1_HEAD \ + --l2.head $L2_HEAD \ + --l2.outputroot $STARTING_OUTPUT_ROOT \ + --l2.claim $L2_CLAIM \ + --l2.blocknumber $L2_BLOCK_NUMBER + +./ethereum-optimism/cannon/bin/cannon load-elf --path=./ethereum-optimism/op-program/bin/op-program-client.elf + +./ethereum-optimism/cannon/bin/cannon run \ + --pprof.cpu \ + --info-at '%10000000' \ + --proof-at never \ + --input ./state.json \ + -- \ + ./ethereum-optimism/op-program/bin/op-program \ + --log.level DEBUG \ + --l1 ${L1RPC} \ + --l2 ${L2RPC} \ + --network sepolia \ + --datadir ${OP_PROGRAM_DATA_DIR} \ + --l1.head ${L1_HEAD} \ + --l2.head ${L2_HEAD} \ + --l2.outputroot ${STARTING_OUTPUT_ROOT} \ + --l2.claim ${L2_CLAIM} \ + --l2.blocknumber ${L2_BLOCK_NUMBER} \ + --server diff --git a/optimism/run-vm.sh b/optimism/run-vm.sh new file mode 100755 index 0000000000..4f24506b4e --- /dev/null +++ b/optimism/run-vm.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash +set -euo pipefail + +cargo run -p kimchi_optimism -- \ + --pprof-cpu \ + --info-at '%10000000' \ + --proof-at never \ + --input ./state.json \ + -- \ + ./ethereum-optimism/op-program/bin/op-program \ + --log.level DEBUG \ + --l1 ${L1RPC} \ + --l2 ${L2RPC} \ + --network sepolia \ + --datadir ${OP_PROGRAM_DATA_DIR} \ + --l1.head ${L1_HEAD} \ + --l2.head ${L2_HEAD} \ + --l2.outputroot ${STARTING_OUTPUT_ROOT} \ + --l2.claim ${L2_CLAIM} \ + --l2.blocknumber ${L2_BLOCK_NUMBER} \ + --server diff --git a/optimism/src/cannon.rs b/optimism/src/cannon.rs new file mode 100644 index 0000000000..6b0049c040 --- /dev/null +++ b/optimism/src/cannon.rs @@ -0,0 +1,137 @@ +// Data structure and stuff for compatibility with Cannon + +use base64::{engine::general_purpose, Engine as _}; +use libflate::zlib::Decoder; +use regex::Regex; +use serde::{Deserialize, Deserializer, Serialize}; +use std::io::Read; + +pub const PAGE_SIZE: usize = 4096; + +#[derive(Serialize, Deserialize, Debug)] +pub struct Page { + pub index: u32, + #[serde(deserialize_with = "from_base64")] + pub data: Vec, +} + +fn from_base64<'de, D>(deserializer: D) -> Result, D::Error> +where + D: Deserializer<'de>, +{ + let s: String = Deserialize::deserialize(deserializer)?; + let b64_decoded = general_purpose::STANDARD.decode(s).unwrap(); + let mut decoder = Decoder::new(&b64_decoded[..]).unwrap(); + let mut data = Vec::new(); + decoder.read_to_end(&mut data).unwrap(); + assert_eq!(data.len(), PAGE_SIZE); + Ok(data) +} + +// The renaming below keeps compatibility with OP Cannon's state format +#[derive(Serialize, Deserialize, Debug)] +pub struct State { + pub memory: Vec, + #[serde(rename = "preimageKey")] + pub preimage_key: String, + #[serde(rename = "preimageOffset")] + pub preimage_offset: u32, + pub pc: u32, + #[serde(rename = "nextPC")] + next_pc: u32, // + pub lo: u32, + pub hi: u32, + pub heap: u32, + exit: u8, + pub exited: bool, + pub step: u64, + pub registers: [u32; 32], + pub last_hint: Option>, +} + +#[derive(Clone, Debug, PartialEq)] +pub enum StepFrequency { + Never, + Always, + Exactly(u64), + Every(u64), +} + +// Simple parser for Cannon's "frequency format" +// A frequency input is either +// - never/always +// - = (only at step n) +// - % (every steps multiple of n) +pub fn step_frequency_parser(s: &str) -> std::result::Result { + use StepFrequency::*; + + let mod_re = Regex::new(r"%([0-9]+)").unwrap(); + let eq_re = Regex::new(r"=([0-9]+)").unwrap(); + + match s { + "never" => Ok(Never), + "always" => Ok(Always), + s => { + if let Some(m) = mod_re.captures(s) { + Ok(Every(m[1].parse::().unwrap())) + } else if let Some(m) = eq_re.captures(s) { + Ok(Exactly(m[1].parse::().unwrap())) + } else { + Err(format!("Unknown frequency format {}", s)) + } + } + } +} + +#[cfg(test)] +mod tests { + + use super::*; + + #[test] + fn sp_parser() { + use StepFrequency::*; + assert_eq!(step_frequency_parser("never"), Ok(Never)); + assert_eq!(step_frequency_parser("always"), Ok(Always)); + assert_eq!(step_frequency_parser("=123"), Ok(Exactly(123))); + assert_eq!(step_frequency_parser("%123"), Ok(Every(123))); + assert!(step_frequency_parser("@123").is_err()); + } +} + +impl ToString for State { + // A very debatable and incomplete, but serviceable, `to_string` implementation. + fn to_string(&self) -> String { + format!( + "memory_size (length): {}\nfirst page size: {}\npreimage key: {}\npreimage offset:{}\npc: {}\nlo: {}\nhi: {}\nregisters:{:#?} ", + self.memory.len(), + self.memory[0].data.len(), + self.preimage_key, + self.preimage_offset, + self.pc, + self.lo, + self.hi, + self.registers + ) + } +} + +#[derive(Debug)] +pub struct HostProgram { + pub name: String, + pub arguments: Vec, +} + +#[derive(Debug)] +pub struct VmConfiguration { + pub input_state_file: String, + pub output_state_file: String, + pub metadata_file: String, + pub proof_at: StepFrequency, + pub stop_at: StepFrequency, + pub info_at: StepFrequency, + pub proof_fmt: String, + pub snapshot_fmt: String, + pub pprof_cpu: bool, + pub host: Option, +} diff --git a/optimism/src/lib.rs b/optimism/src/lib.rs new file mode 100644 index 0000000000..da4f2ad95d --- /dev/null +++ b/optimism/src/lib.rs @@ -0,0 +1,2 @@ +pub mod cannon; +pub mod mips; diff --git a/optimism/src/main.rs b/optimism/src/main.rs new file mode 100644 index 0000000000..732ceddb3d --- /dev/null +++ b/optimism/src/main.rs @@ -0,0 +1,143 @@ +use clap::{arg, value_parser, Arg, ArgAction, Command}; +use kimchi_optimism::{ + cannon::{State, VmConfiguration}, + mips::witness, +}; +use std::{fs::File, io::BufReader, process::ExitCode}; + +fn cli() -> VmConfiguration { + use kimchi_optimism::cannon::*; + + let app_name = "zkvm"; + let cli = Command::new(app_name) + .version("0.1") + .about("MIPS-based zkvm") + .arg(arg!(--input "initial state file").default_value("state.json")) + .arg(arg!(--output "output state file").default_value("out.json")) + .arg(arg!(--meta "metadata file").default_value("meta.json")) + // The CLI arguments below this line are ignored at this point + .arg( + Arg::new("proof-at") + .short('p') + .long("proof-at") + .value_name("FREQ") + .default_value("never") + .value_parser(step_frequency_parser), + ) + .arg( + Arg::new("proof-fmt") + .long("proof-fmt") + .value_name("FORMAT") + .default_value("proof-%d.json"), + ) + .arg( + Arg::new("snapshot-fmt") + .long("snapshot-fmt") + .value_name("FORMAT") + .default_value("state-%d.json"), + ) + .arg( + Arg::new("stop-at") + .long("stop-at") + .value_name("FREQ") + .default_value("never") + .value_parser(step_frequency_parser), + ) + .arg( + Arg::new("info-at") + .long("info-at") + .value_name("FREQ") + .default_value("never") + .value_parser(step_frequency_parser), + ) + .arg( + Arg::new("pprof-cpu") + .long("pprof-cpu") + .action(ArgAction::SetTrue), + ) + .arg( + arg!(host: [HOST] "host program specification [host program arguments]") + .num_args(1..) + .last(true) + .value_parser(value_parser!(String)), + ); + + let cli = cli.get_matches(); + + let input_state_file = cli.get_one::("input").unwrap(); + + let output_state_file = cli.get_one::("output").unwrap(); + + let metadata_file = cli.get_one::("meta").unwrap(); + + let proof_at = cli.get_one::("proof-at").unwrap(); + let info_at = cli.get_one::("info-at").unwrap(); + let stop_at = cli.get_one::("stop-at").unwrap(); + + let proof_fmt = cli.get_one::("proof-fmt").unwrap(); + let snapshot_fmt = cli.get_one::("snapshot-fmt").unwrap(); + let pprof_cpu = cli.get_one::("pprof-cpu").unwrap(); + + let host_spec = cli + .get_many::("host") + .map(|vals| vals.collect::>()) + .unwrap_or_default(); + + let host = if host_spec.is_empty() { + None + } else { + Some(HostProgram { + name: host_spec[0].to_string(), + arguments: host_spec[1..] + .to_vec() + .iter() + .map(|x| x.to_string()) + .collect(), + }) + }; + + VmConfiguration { + input_state_file: input_state_file.to_string(), + output_state_file: output_state_file.to_string(), + metadata_file: metadata_file.to_string(), + proof_at: proof_at.clone(), + stop_at: stop_at.clone(), + info_at: info_at.clone(), + proof_fmt: proof_fmt.to_string(), + snapshot_fmt: snapshot_fmt.to_string(), + pprof_cpu: *pprof_cpu, + host, + } +} + +pub fn main() -> ExitCode { + let configuration = cli(); + + println!("configuration\n{:#?}", configuration); + + let file = File::open(configuration.input_state_file).expect("Error opening input state file "); + + let reader = BufReader::new(file); + // Read the JSON contents of the file as an instance of `State`. + let state: State = serde_json::from_reader(reader).expect("Error reading input state file"); + + if let Some(host_program) = configuration.host { + println!("Launching host program {}", host_program.name); + + let _child = std::process::Command::new(host_program.name) + .args(host_program.arguments) + .spawn() + .expect("Could not spawn host process"); + }; + + let page_size = 1 << 12; + + let mut env = witness::Env::::create(page_size, state); + + while !env.halt { + env.step(); + } + + // TODO: Logic + ExitCode::FAILURE +} diff --git a/optimism/src/mips/mod.rs b/optimism/src/mips/mod.rs new file mode 100644 index 0000000000..2499222bb4 --- /dev/null +++ b/optimism/src/mips/mod.rs @@ -0,0 +1,2 @@ +pub mod registers; +pub mod witness; diff --git a/optimism/src/mips/registers.rs b/optimism/src/mips/registers.rs new file mode 100644 index 0000000000..89a1ee23b8 --- /dev/null +++ b/optimism/src/mips/registers.rs @@ -0,0 +1,47 @@ +use serde::{Deserialize, Serialize}; +use std::ops::{Index, IndexMut}; + +pub const NUM_REGISTERS: usize = 34; + +#[derive(Clone, Default, Debug, Serialize, Deserialize)] +pub struct Registers { + pub general_purpose: [T; 32], + pub hi: T, + pub lo: T, +} + +impl Registers { + pub fn iter(&self) -> impl Iterator { + self.general_purpose.iter().chain([&self.hi, &self.lo]) + } +} + +impl Index for Registers { + type Output = T; + + fn index(&self, index: usize) -> &Self::Output { + if index < 32 { + &self.general_purpose[index] + } else if index == 32 { + &self.hi + } else if index == 33 { + &self.lo + } else { + panic!("Index out of bounds"); + } + } +} + +impl IndexMut for Registers { + fn index_mut(&mut self, index: usize) -> &mut Self::Output { + if index < 32 { + &mut self.general_purpose[index] + } else if index == 32 { + &mut self.hi + } else if index == 33 { + &mut self.lo + } else { + panic!("Index out of bounds"); + } + } +} diff --git a/optimism/src/mips/witness.rs b/optimism/src/mips/witness.rs new file mode 100644 index 0000000000..dbaa7b30a8 --- /dev/null +++ b/optimism/src/mips/witness.rs @@ -0,0 +1,99 @@ +use crate::{cannon::State, mips::registers::Registers}; +use ark_ff::Field; +use std::array; + +pub const NUM_GLOBAL_LOOKUP_TERMS: usize = 1; +pub const NUM_DECODING_LOOKUP_TERMS: usize = 2; +pub const NUM_INSTRUCTION_LOOKUP_TERMS: usize = 5; +pub const NUM_LOOKUP_TERMS: usize = + NUM_GLOBAL_LOOKUP_TERMS + NUM_DECODING_LOOKUP_TERMS + NUM_INSTRUCTION_LOOKUP_TERMS; +pub const SCRATCH_SIZE: usize = 25; + +#[derive(Clone)] +pub struct SyscallEnv { + pub heap: u32, // Heap pointer (actually unused in Cannon as of [2023-10-18]) + pub preimage_offset: u32, + pub preimage_key: Vec, + pub last_hint: Option>, +} + +impl SyscallEnv { + pub fn create(state: &State) -> Self { + SyscallEnv { + heap: state.heap, + preimage_key: state.preimage_key.as_bytes().to_vec(), // Might not be correct + preimage_offset: state.preimage_offset, + last_hint: state.last_hint.clone(), + } + } +} + +#[derive(Clone)] +pub struct Env { + pub instruction_counter: usize, + pub memory: Vec<(u32, Vec)>, + pub memory_write_index: Vec<(u32, Vec)>, + pub registers: Registers, + pub registers_write_index: Registers, + pub instruction_pointer: u32, + pub scratch_state_idx: usize, + pub scratch_state: [Fp; SCRATCH_SIZE], + pub halt: bool, + pub syscall_env: SyscallEnv, +} + +fn fresh_scratch_state() -> [Fp; N] { + array::from_fn(|_| Fp::zero()) +} + +impl Env { + pub fn create(page_size: usize, state: State) -> Self { + let initial_instruction_pointer = state.pc; + + let syscall_env = SyscallEnv::create(&state); + + let mut initial_memory: Vec<(u32, Vec)> = state + .memory + .into_iter() + // Check that the conversion from page data is correct + .map(|page| (page.index, page.data)) + .collect(); + + for (_address, initial_memory) in initial_memory.iter_mut() { + initial_memory.extend((0..(page_size - initial_memory.len())).map(|_| 0u8)); + assert_eq!(initial_memory.len(), page_size); + } + + let memory_offsets = initial_memory + .iter() + .map(|(offset, _)| *offset) + .collect::>(); + + let initial_registers = Registers { + lo: state.lo, + hi: state.hi, + general_purpose: state.registers, + }; + + Env { + instruction_counter: state.step as usize, + memory: initial_memory.clone(), + memory_write_index: memory_offsets + .iter() + .map(|offset| (*offset, vec![0usize; page_size])) + .collect(), + registers: initial_registers.clone(), + registers_write_index: Registers::default(), + instruction_pointer: initial_instruction_pointer, + scratch_state_idx: 0, + scratch_state: fresh_scratch_state(), + halt: state.exited, + syscall_env, + } + } + + pub fn step(&mut self) { + // TODO + self.halt = true; + } +} diff --git a/poly-commitment/Cargo.toml b/poly-commitment/Cargo.toml index 6f40ad5f7d..890555082e 100644 --- a/poly-commitment/Cargo.toml +++ b/poly-commitment/Cargo.toml @@ -37,6 +37,7 @@ ocaml-gen = { version = "0.1.5", optional = true } [dev-dependencies] colored = "2.0.0" rand_chacha = { version = "0.3.0" } +ark-bn254 = { version = "0.3.0" } [features] ocaml_types = [ "ocaml", "ocaml-gen" ] diff --git a/poly-commitment/src/commitment.rs b/poly-commitment/src/commitment.rs index 001e4b021e..3d6cdf2411 100644 --- a/poly-commitment/src/commitment.rs +++ b/poly-commitment/src/commitment.rs @@ -7,6 +7,7 @@ //! 3. Verify batch of batched opening proofs use crate::srs::endos; +use crate::SRS as SRSTrait; use crate::{error::CommitmentError, srs::SRS}; use ark_ec::{ models::short_weierstrass_jacobian::GroupAffine as SWJAffine, msm::VariableBaseMSM, @@ -357,7 +358,12 @@ pub trait CommitmentCurve: AffineCurve { fn to_coordinates(&self) -> Option<(Self::BaseField, Self::BaseField)>; fn of_coordinates(x: Self::BaseField, y: Self::BaseField) -> Self; +} +/// A trait extending CommitmentCurve for endomorphisms. +/// Unfortunately, we can't specify that `AffineCurve`, +/// so usage of this traits must manually bind `G::BaseField: PrimeField`. +pub trait EndoCurve: CommitmentCurve { /// Combine where x1 = one fn combine_one(g1: &[Self], g2: &[Self], x2: Self::ScalarField) -> Vec { crate::combine::window_combine(g1, g2, Self::ScalarField::one(), x2) @@ -384,10 +390,7 @@ pub trait CommitmentCurve: AffineCurve { } } -impl CommitmentCurve for SWJAffine

-where - P::BaseField: PrimeField, -{ +impl CommitmentCurve for SWJAffine

{ type Params = P; type Map = BWParameters

; @@ -402,7 +405,12 @@ where fn of_coordinates(x: P::BaseField, y: P::BaseField) -> SWJAffine

{ SWJAffine::

::new(x, y, false) } +} +impl EndoCurve for SWJAffine

+where + P::BaseField: PrimeField, +{ fn combine_one(g1: &[Self], g2: &[Self], x2: Self::ScalarField) -> Vec { crate::combine::affine_window_combine_one(g1, g2, x2) } @@ -500,7 +508,7 @@ where /// Contains the batch evaluation // TODO: I think we should really change this name to something more correct -pub struct BatchEvaluationProof<'a, G, EFqSponge> +pub struct BatchEvaluationProof<'a, G, EFqSponge, OpeningProof> where G: AffineCurve, EFqSponge: FqSponge, @@ -514,23 +522,114 @@ where /// scaling factor for polynomials pub evalscale: G::ScalarField, /// batched opening proof - pub opening: &'a OpeningProof, + pub opening: &'a OpeningProof, pub combined_inner_product: G::ScalarField, } -impl SRS { +pub fn combine_commitments( + evaluations: &[Evaluation], + scalars: &mut Vec, + points: &mut Vec, + polyscale: G::ScalarField, + rand_base: G::ScalarField, +) { + let mut xi_i = G::ScalarField::one(); + + for Evaluation { + commitment, + degree_bound, + .. + } in evaluations + .iter() + .filter(|x| !x.commitment.unshifted.is_empty()) + { + // iterating over the polynomial segments + for comm_ch in &commitment.unshifted { + scalars.push(rand_base * xi_i); + points.push(*comm_ch); + + xi_i *= polyscale; + } + + if let Some(_m) = degree_bound { + if let Some(comm_ch) = commitment.shifted { + if !comm_ch.is_zero() { + // polyscale^i sum_j evalscale^j elm_j^{N - m} f(elm_j) + scalars.push(rand_base * xi_i); + points.push(comm_ch); + + xi_i *= polyscale; + } + } + } + } +} + +pub fn combine_evaluations( + evaluations: &Vec>, + polyscale: G::ScalarField, +) -> Vec { + let mut xi_i = G::ScalarField::one(); + let mut acc = { + let num_evals = if !evaluations.is_empty() { + evaluations[0].evaluations.len() + } else { + 0 + }; + vec![G::ScalarField::zero(); num_evals] + }; + + for Evaluation { + evaluations, + degree_bound, + .. + } in evaluations + .iter() + .filter(|x| !x.commitment.unshifted.is_empty()) + { + // iterating over the polynomial segments + for j in 0..evaluations[0].len() { + for i in 0..evaluations.len() { + acc[i] += evaluations[i][j] * xi_i; + } + xi_i *= polyscale; + } + + if let Some(_m) = degree_bound { + todo!("Misaligned chunked commitments are not supported") + } + } + + acc +} + +impl SRSTrait for SRS { + /// The maximum polynomial degree that can be committed to + fn max_poly_size(&self) -> usize { + self.g.len() + } + + fn get_lagrange_basis(&self, domain_size: usize) -> Option<&Vec>> { + self.lagrange_bases.get(&domain_size) + } + + fn blinding_commitment(&self) -> G { + self.h + } + /// Commits a polynomial, potentially splitting the result in multiple commitments. - pub fn commit( + fn commit( &self, plnm: &DensePolynomial, + num_chunks: usize, max: Option, rng: &mut (impl RngCore + CryptoRng), ) -> BlindedCommitment { - self.mask(self.commit_non_hiding(plnm, max), rng) + self.mask(self.commit_non_hiding(plnm, num_chunks, max), rng) } /// Turns a non-hiding polynomial commitment into a hidding polynomial commitment. Transforms each given `` into `( + wH, w)` with a random `w` per commitment. - pub fn mask( + fn mask( &self, comm: PolyComm, rng: &mut (impl RngCore + CryptoRng), @@ -540,7 +639,7 @@ impl SRS { } /// Same as [SRS::mask] except that you can pass the blinders manually. - pub fn mask_custom( + fn mask_custom( &self, com: PolyComm, blinders: &PolyComm, @@ -561,13 +660,15 @@ impl SRS { /// This function commits a polynomial using the SRS' basis of size `n`. /// - `plnm`: polynomial to commit to with max size of sections + /// - `num_chunks`: the number of unshifted commitments to be included in the output polynomial commitment /// - `max`: maximal degree of the polynomial (not inclusive), if none, no degree bound /// The function returns an unbounded commitment vector (which splits the commitment into several commitments of size at most `n`), /// as well as an optional bounded commitment (if `max` is set). /// Note that a maximum degree cannot (and doesn't need to) be enforced via a shift if `max` is a multiple of `n`. - pub fn commit_non_hiding( + fn commit_non_hiding( &self, plnm: &DensePolynomial, + num_chunks: usize, max: Option, ) -> PolyComm { let is_zero = plnm.is_zero(); @@ -588,6 +689,10 @@ impl SRS { }); } + for _ in unshifted.len()..num_chunks { + unshifted.push(G::zero()); + } + // committing only last chunk shifted to the right edge of SRS let shifted = match max { None => None, @@ -613,7 +718,7 @@ impl SRS { PolyComm:: { unshifted, shifted } } - pub fn commit_evaluations_non_hiding( + fn commit_evaluations_non_hiding( &self, domain: D, plnm: &Evaluations>, @@ -638,7 +743,7 @@ impl SRS { } } - pub fn commit_evaluations( + fn commit_evaluations( &self, domain: D, plnm: &Evaluations>, @@ -646,7 +751,9 @@ impl SRS { ) -> BlindedCommitment { self.mask(self.commit_evaluations_non_hiding(domain, plnm), rng) } +} +impl SRS { /// This function verifies batch of batched polynomial commitment opening proofs /// batch: batch of batched polynomial commitment opening proofs /// vector of evaluation points @@ -660,7 +767,7 @@ impl SRS { pub fn verify( &self, group_map: &G::Map, - batch: &mut [BatchEvaluationProof], + batch: &mut [BatchEvaluationProof>], rng: &mut RNG, ) -> bool where @@ -798,38 +905,13 @@ impl SRS { // sum_j evalscale^j (sum_i polyscale^i f_i) (elm_j) // == sum_j sum_i evalscale^j polyscale^i f_i(elm_j) // == sum_i polyscale^i sum_j evalscale^j f_i(elm_j) - { - let mut xi_i = G::ScalarField::one(); - - for Evaluation { - commitment, - degree_bound, - .. - } in evaluations - .iter() - .filter(|x| !x.commitment.unshifted.is_empty()) - { - // iterating over the polynomial segments - for comm_ch in &commitment.unshifted { - scalars.push(rand_base_i_c_i * xi_i); - points.push(*comm_ch); - - xi_i *= *polyscale; - } - - if let Some(_m) = degree_bound { - if let Some(comm_ch) = commitment.shifted { - if !comm_ch.is_zero() { - // polyscale^i sum_j evalscale^j elm_j^{N - m} f(elm_j) - scalars.push(rand_base_i_c_i * xi_i); - points.push(comm_ch); - - xi_i *= *polyscale; - } - } - } - } - }; + combine_commitments( + evaluations, + &mut scalars, + &mut points, + *polyscale, + rand_base_i_c_i, + ); scalars.push(rand_base_i_c_i * *combined_inner_product); points.push(u); @@ -879,12 +961,14 @@ mod tests { let mut srs = SRS::::create(n); srs.add_lagrange_basis(domain); + let num_chunks = domain.size() / srs.g.len(); + let expected_lagrange_commitments: Vec<_> = (0..n) .map(|i| { let mut e = vec![Fp::zero(); n]; e[i] = Fp::one(); let p = Evaluations::>::from_vec_and_domain(e, domain).interpolate(); - srs.commit_non_hiding(&p, None) + srs.commit_non_hiding(&p, num_chunks, None) }) .collect(); @@ -905,12 +989,14 @@ mod tests { let mut srs = SRS::::create(n / 2); srs.add_lagrange_basis(domain); + let num_chunks = domain.size() / srs.g.len(); + let expected_lagrange_commitments: Vec<_> = (0..n) .map(|i| { let mut e = vec![Fp::zero(); n]; e[i] = Fp::one(); let p = Evaluations::>::from_vec_and_domain(e, domain).interpolate(); - srs.commit_non_hiding(&p, None) + srs.commit_non_hiding(&p, num_chunks, None) }) .collect(); @@ -931,12 +1017,14 @@ mod tests { let mut srs = SRS::::create(n / 2 + 1); srs.add_lagrange_basis(domain); + let num_chunks = (domain.size() + srs.g.len() - 1) / srs.g.len(); + let expected_lagrange_commitments: Vec<_> = (0..n) .map(|i| { let mut e = vec![Fp::zero(); n]; e[i] = Fp::one(); let p = Evaluations::>::from_vec_and_domain(e, domain).interpolate(); - srs.commit_non_hiding(&p, Some(64)) + srs.commit_non_hiding(&p, num_chunks, Some(64)) }) .collect(); @@ -961,9 +1049,9 @@ mod tests { let rng = &mut StdRng::from_seed([0u8; 32]); // commit the two polynomials (and upperbound the second one) - let commitment = srs.commit(&poly1, None, rng); + let commitment = srs.commit(&poly1, 1, None, rng); let upperbound = poly2.degree() + 1; - let bounded_commitment = srs.commit(&poly2, Some(upperbound), rng); + let bounded_commitment = srs.commit(&poly2, 1, Some(upperbound), rng); // create an aggregated opening proof let (u, v) = (Fp::rand(rng), Fp::rand(rng)); @@ -994,10 +1082,10 @@ mod tests { // evaluate the polynomials at these two points let poly1_chunked_evals = vec![ poly1 - .to_chunked_polynomial(srs.g.len()) + .to_chunked_polynomial(1, srs.g.len()) .evaluate_chunks(elm[0]), poly1 - .to_chunked_polynomial(srs.g.len()) + .to_chunked_polynomial(1, srs.g.len()) .evaluate_chunks(elm[1]), ]; @@ -1010,10 +1098,10 @@ mod tests { let poly2_chunked_evals = vec![ poly2 - .to_chunked_polynomial(srs.g.len()) + .to_chunked_polynomial(1, srs.g.len()) .evaluate_chunks(elm[0]), poly2 - .to_chunked_polynomial(srs.g.len()) + .to_chunked_polynomial(1, srs.g.len()) .evaluate_chunks(elm[1]), ]; diff --git a/poly-commitment/src/evaluation_proof.rs b/poly-commitment/src/evaluation_proof.rs index 7c030787cc..0b15615b66 100644 --- a/poly-commitment/src/evaluation_proof.rs +++ b/poly-commitment/src/evaluation_proof.rs @@ -1,11 +1,11 @@ -use crate::srs::SRS; use crate::{commitment::*, srs::endos}; +use crate::{srs::SRS, PolynomialsToCombine, SRS as _}; use ark_ec::{msm::VariableBaseMSM, AffineCurve, ProjectiveCurve}; use ark_ff::{FftField, Field, One, PrimeField, UniformRand, Zero}; use ark_poly::{univariate::DensePolynomial, UVPolynomial}; use ark_poly::{EvaluationDomain, Evaluations}; use mina_poseidon::{sponge::ScalarChallenge, FqSponge}; -use o1_utils::math; +use o1_utils::{math, ExtendedDensePolynomial}; use rand_core::{CryptoRng, RngCore}; use rayon::prelude::*; use serde::{Deserialize, Serialize}; @@ -71,6 +71,103 @@ impl<'a, F: Field> ScaledChunkedPolynomial { } } +/// Combine the polynomials using `polyscale`, creating a single unified polynomial to open. +pub fn combine_polys>( + plnms: PolynomialsToCombine, // vector of polynomial with optional degree bound and commitment randomness + polyscale: G::ScalarField, // scaling factor for polynoms + srs_length: usize, +) -> (DensePolynomial, G::ScalarField) { + let mut plnm = ScaledChunkedPolynomial::::default(); + let mut plnm_evals_part = { + // For now just check that all the evaluation polynomials are the same degree so that we + // can do just a single FFT. + // Furthermore we check they have size less than the SRS size so we don't have to do chunking. + // If/when we change this, we can add more complicated code to handle different degrees. + let degree = plnms + .iter() + .fold(None, |acc, (p, _, _)| match p { + DensePolynomialOrEvaluations::DensePolynomial(_) => acc, + DensePolynomialOrEvaluations::Evaluations(_, d) => { + if let Some(n) = acc { + assert_eq!(n, d.size()); + } + Some(d.size()) + } + }) + .unwrap_or(0); + vec![G::ScalarField::zero(); degree] + }; + // let mut plnm_chunks: Vec<(G::ScalarField, OptShiftedPolynomial<_>)> = vec![]; + + let mut omega = G::ScalarField::zero(); + let mut scale = G::ScalarField::one(); + + // iterating over polynomials in the batch + for (p_i, degree_bound, omegas) in plnms { + match p_i { + DensePolynomialOrEvaluations::Evaluations(evals_i, sub_domain) => { + let stride = evals_i.evals.len() / sub_domain.size(); + let evals = &evals_i.evals; + plnm_evals_part + .par_iter_mut() + .enumerate() + .for_each(|(i, x)| { + *x += scale * evals[i * stride]; + }); + for j in 0..omegas.unshifted.len() { + omega += &(omegas.unshifted[j] * scale); + scale *= &polyscale; + } + // We assume here that we have no shifted segment. + // TODO: Remove shifted + } + + DensePolynomialOrEvaluations::DensePolynomial(p_i) => { + let mut offset = 0; + // iterating over chunks of the polynomial + if let Some(m) = degree_bound { + assert!(p_i.coeffs.len() <= m + 1); + } else { + assert!(omegas.shifted.is_none()); + } + for j in 0..omegas.unshifted.len() { + let segment = &p_i.coeffs[std::cmp::min(offset, p_i.coeffs.len()) + ..std::cmp::min(offset + srs_length, p_i.coeffs.len())]; + // always mixing in the unshifted segments + plnm.add_unshifted(scale, segment); + + omega += &(omegas.unshifted[j] * scale); + scale *= &polyscale; + offset += srs_length; + if let Some(m) = degree_bound { + if offset >= *m { + if offset > *m { + // mixing in the shifted segment since degree is bounded + plnm.add_shifted(scale, srs_length - m % srs_length, segment); + } + omega += &(omegas.shifted.unwrap() * scale); + scale *= &polyscale; + } + } + } + } + } + } + + let mut plnm = plnm.to_dense_polynomial(); + if !plnm_evals_part.is_empty() { + let n = plnm_evals_part.len(); + let max_poly_size = srs_length; + let num_chunks = n / max_poly_size; + plnm += &Evaluations::from_vec_and_domain(plnm_evals_part, D::new(n).unwrap()) + .interpolate() + .to_chunked_polynomial(num_chunks, max_poly_size) + .linearize(polyscale); + } + + (plnm, omega) +} + impl SRS { /// This function opens polynomial commitments in batch /// plnms: batch of polynomials to open commitments for with, optionally, max degrees @@ -101,6 +198,7 @@ impl SRS { EFqSponge: Clone + FqSponge, RNG: RngCore + CryptoRng, G::BaseField: PrimeField, + G: EndoCurve, { let (endo_q, endo_r) = endos::(); @@ -112,95 +210,7 @@ impl SRS { let mut g = self.g.clone(); g.extend(vec![G::zero(); padding]); - let (p, blinding_factor) = { - let mut plnm = ScaledChunkedPolynomial::::default(); - let mut plnm_evals_part = { - // For now just check that all the evaluation polynomials are the same degree so that we - // can do just a single FFT. - // Furthermore we check they have size less than the SRS size so we don't have to do chunking. - // If/when we change this, we can add more complicated code to handle different degrees. - let degree = plnms - .iter() - .fold(None, |acc, (p, _, _)| match p { - DensePolynomialOrEvaluations::DensePolynomial(_) => acc, - DensePolynomialOrEvaluations::Evaluations(_, d) => { - if let Some(n) = acc { - assert_eq!(n, d.size()); - } - Some(d.size()) - } - }) - .unwrap_or(0); - assert!(degree <= padded_length); - vec![G::ScalarField::zero(); degree] - }; - // let mut plnm_chunks: Vec<(G::ScalarField, OptShiftedPolynomial<_>)> = vec![]; - - let mut omega = G::ScalarField::zero(); - let mut scale = G::ScalarField::one(); - - // iterating over polynomials in the batch - for (p_i, degree_bound, omegas) in plnms { - match p_i { - DensePolynomialOrEvaluations::Evaluations(evals_i, sub_domain) => { - let stride = evals_i.evals.len() / sub_domain.size(); - let evals = &evals_i.evals; - plnm_evals_part - .par_iter_mut() - .enumerate() - .for_each(|(i, x)| { - *x += scale * evals[i * stride]; - }); - assert_eq!(omegas.unshifted.len(), 1); - omega += &(omegas.unshifted[0] * scale); - scale *= &polyscale; - } - - DensePolynomialOrEvaluations::DensePolynomial(p_i) => { - let mut offset = 0; - // iterating over chunks of the polynomial - if let Some(m) = degree_bound { - assert!(p_i.coeffs.len() <= m + 1); - } else { - assert!(omegas.shifted.is_none()); - } - for j in 0..omegas.unshifted.len() { - let segment = &p_i.coeffs - [offset..std::cmp::min(offset + self.g.len(), p_i.coeffs.len())]; - // always mixing in the unshifted segments - plnm.add_unshifted(scale, segment); - - omega += &(omegas.unshifted[j] * scale); - scale *= &polyscale; - offset += self.g.len(); - if let Some(m) = degree_bound { - if offset >= *m { - if offset > *m { - // mixing in the shifted segment since degree is bounded - plnm.add_shifted( - scale, - self.g.len() - m % self.g.len(), - segment, - ); - } - omega += &(omegas.shifted.unwrap() * scale); - scale *= &polyscale; - } - } - } - } - } - } - - let mut plnm = plnm.to_dense_polynomial(); - if !plnm_evals_part.is_empty() { - let n = plnm_evals_part.len(); - plnm += &Evaluations::from_vec_and_domain(plnm_evals_part, D::new(n).unwrap()) - .interpolate(); - } - - (plnm, omega) - }; + let (p, blinding_factor) = combine_polys::(plnms, polyscale, self.g.len()); let rounds = math::ceil_log2(self.g.len()); @@ -345,10 +355,59 @@ impl SRS { sg: g0, } } + + /// This function is a debugging helper. + #[allow(clippy::too_many_arguments)] + #[allow(clippy::type_complexity)] + #[allow(clippy::many_single_char_names)] + pub fn prover_polynomials_to_verifier_evaluations>( + &self, + plnms: &[( + DensePolynomialOrEvaluations, + Option, + PolyComm, + )], // vector of polynomial with optional degree bound and commitment randomness + elm: &[G::ScalarField], // vector of evaluation points + ) -> Vec> + where + G::BaseField: PrimeField, + { + plnms + .iter() + .enumerate() + .map(|(i, (poly_or_evals, degree_bound, blinders))| { + let poly = match poly_or_evals { + DensePolynomialOrEvaluations::DensePolynomial(poly) => (*poly).clone(), + DensePolynomialOrEvaluations::Evaluations(evals, _) => { + (*evals).clone().interpolate() + } + }; + let chunked_polynomial = + poly.to_chunked_polynomial(blinders.unshifted.len(), self.g.len()); + let chunked_commitment = + { self.commit_non_hiding(&poly, blinders.unshifted.len(), None) }; + let masked_commitment = match self.mask_custom(chunked_commitment, blinders) { + Ok(comm) => comm, + Err(err) => panic!("Error at index {i}: {err}"), + }; + let chunked_evals = elm + .iter() + .map(|elm| chunked_polynomial.evaluate_chunks(*elm)) + .collect(); + Evaluation { + commitment: masked_commitment.commitment, + + evaluations: chunked_evals, + + degree_bound: *degree_bound, + } + }) + .collect() + } } #[serde_as] -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Debug, Serialize, Deserialize, Default)] #[serde(bound = "G: ark_serialize::CanonicalDeserialize + ark_serialize::CanonicalSerialize")] pub struct OpeningProof { /// vector of rounds of L & R commitments @@ -364,6 +423,49 @@ pub struct OpeningProof { pub sg: G, } +impl< + BaseField: PrimeField, + G: AffineCurve + CommitmentCurve + EndoCurve, + > crate::OpenProof for OpeningProof +{ + type SRS = SRS; + + fn open::ScalarField>>( + srs: &Self::SRS, + group_map: &::Map, + plnms: &[( + DensePolynomialOrEvaluations<::ScalarField, D>, + Option, + PolyComm<::ScalarField>, + )], // vector of polynomial with optional degree bound and commitment randomness + elm: &[::ScalarField], // vector of evaluation points + polyscale: ::ScalarField, // scaling factor for polynoms + evalscale: ::ScalarField, // scaling factor for evaluation point powers + sponge: EFqSponge, // sponge + rng: &mut RNG, + ) -> Self + where + EFqSponge: + Clone + FqSponge<::BaseField, G, ::ScalarField>, + RNG: RngCore + CryptoRng, + { + srs.open(group_map, plnms, elm, polyscale, evalscale, sponge, rng) + } + + fn verify( + srs: &Self::SRS, + group_map: &G::Map, + batch: &mut [BatchEvaluationProof], + rng: &mut RNG, + ) -> bool + where + EFqSponge: FqSponge, + RNG: RngCore + CryptoRng, + { + srs.verify(group_map, batch, rng) + } +} + pub struct Challenges { pub chal: Vec, pub chal_inv: Vec, diff --git a/poly-commitment/src/lib.rs b/poly-commitment/src/lib.rs index 3b6cd8b3e6..4d7bac7913 100644 --- a/poly-commitment/src/lib.rs +++ b/poly-commitment/src/lib.rs @@ -3,9 +3,121 @@ mod combine; pub mod commitment; pub mod error; pub mod evaluation_proof; +pub mod pairing_proof; pub mod srs; #[cfg(test)] mod tests; pub use commitment::PolyComm; + +use crate::commitment::{BatchEvaluationProof, BlindedCommitment, CommitmentCurve}; +use crate::error::CommitmentError; +use crate::evaluation_proof::DensePolynomialOrEvaluations; +use ark_ec::AffineCurve; +use ark_ff::UniformRand; +use ark_poly::{ + univariate::DensePolynomial, EvaluationDomain, Evaluations, Radix2EvaluationDomain as D, +}; +use mina_poseidon::FqSponge; +use rand_core::{CryptoRng, RngCore}; + +pub trait SRS { + /// The maximum polynomial degree that can be committed to + fn max_poly_size(&self) -> usize; + + /// Retrieve the precomputed Lagrange basis for the given domain size + fn get_lagrange_basis(&self, domain_size: usize) -> Option<&Vec>>; + + /// Get the group element used for blinding commitments + fn blinding_commitment(&self) -> G; + + /// Commits a polynomial, potentially splitting the result in multiple commitments. + fn commit( + &self, + plnm: &DensePolynomial, + num_chunks: usize, + max: Option, + rng: &mut (impl RngCore + CryptoRng), + ) -> BlindedCommitment; + + /// Same as [SRS::mask] except that you can pass the blinders manually. + fn mask_custom( + &self, + com: PolyComm, + blinders: &PolyComm, + ) -> Result, CommitmentError>; + + /// Turns a non-hiding polynomial commitment into a hidding polynomial commitment. Transforms each given `` into `( + wH, w)` with a random `w` per commitment. + fn mask( + &self, + comm: PolyComm, + rng: &mut (impl RngCore + CryptoRng), + ) -> BlindedCommitment { + let blinders = comm.map(|_| G::ScalarField::rand(rng)); + self.mask_custom(comm, &blinders).unwrap() + } + + /// This function commits a polynomial using the SRS' basis of size `n`. + /// - `plnm`: polynomial to commit to with max size of sections + /// - `max`: maximal degree of the polynomial (not inclusive), if none, no degree bound + /// The function returns an unbounded commitment vector (which splits the commitment into several commitments of size at most `n`), + /// as well as an optional bounded commitment (if `max` is set). + /// Note that a maximum degree cannot (and doesn't need to) be enforced via a shift if `max` is a multiple of `n`. + fn commit_non_hiding( + &self, + plnm: &DensePolynomial, + num_chunks: usize, + max: Option, + ) -> PolyComm; + + fn commit_evaluations_non_hiding( + &self, + domain: D, + plnm: &Evaluations>, + ) -> PolyComm; + + fn commit_evaluations( + &self, + domain: D, + plnm: &Evaluations>, + rng: &mut (impl RngCore + CryptoRng), + ) -> BlindedCommitment; +} + +#[allow(type_alias_bounds)] +type PolynomialsToCombine<'a, G: CommitmentCurve, D: EvaluationDomain> = &'a [( + DensePolynomialOrEvaluations<'a, G::ScalarField, D>, + Option, + PolyComm, +)]; + +pub trait OpenProof: Sized { + type SRS: SRS; + + #[allow(clippy::too_many_arguments)] + fn open::ScalarField>>( + srs: &Self::SRS, + group_map: &::Map, + plnms: PolynomialsToCombine, // vector of polynomial with optional degree bound and commitment randomness + elm: &[::ScalarField], // vector of evaluation points + polyscale: ::ScalarField, // scaling factor for polynoms + evalscale: ::ScalarField, // scaling factor for evaluation point powers + sponge: EFqSponge, // sponge + rng: &mut RNG, + ) -> Self + where + EFqSponge: + Clone + FqSponge<::BaseField, G, ::ScalarField>, + RNG: RngCore + CryptoRng; + + fn verify( + srs: &Self::SRS, + group_map: &G::Map, + batch: &mut [BatchEvaluationProof], + rng: &mut RNG, + ) -> bool + where + EFqSponge: FqSponge, + RNG: RngCore + CryptoRng; +} diff --git a/poly-commitment/src/pairing_proof.rs b/poly-commitment/src/pairing_proof.rs new file mode 100644 index 0000000000..913cf15d0f --- /dev/null +++ b/poly-commitment/src/pairing_proof.rs @@ -0,0 +1,430 @@ +use crate::commitment::*; +use crate::evaluation_proof::{combine_polys, DensePolynomialOrEvaluations}; +use crate::srs::SRS; +use crate::{CommitmentError, PolynomialsToCombine, SRS as SRSTrait}; +use ark_ec::{msm::VariableBaseMSM, AffineCurve, PairingEngine}; +use ark_ff::{PrimeField, Zero}; +use ark_poly::{ + univariate::{DenseOrSparsePolynomial, DensePolynomial}, + EvaluationDomain, Evaluations, Polynomial, Radix2EvaluationDomain as D, UVPolynomial, +}; +use mina_poseidon::FqSponge; +use rand_core::{CryptoRng, RngCore}; +use serde::{Deserialize, Serialize}; +use serde_with::serde_as; + +#[serde_as] +#[derive(Debug, Serialize, Deserialize)] +#[serde( + bound = "Pair::G1Affine: ark_serialize::CanonicalDeserialize + ark_serialize::CanonicalSerialize" +)] +pub struct PairingProof { + #[serde_as(as = "o1_utils::serialization::SerdeAs")] + pub quotient: Pair::G1Affine, + #[serde_as(as = "o1_utils::serialization::SerdeAs")] + pub blinding: ::ScalarField, +} + +impl Default for PairingProof { + fn default() -> Self { + Self { + quotient: Pair::G1Affine::prime_subgroup_generator(), + blinding: ::ScalarField::zero(), + } + } +} + +impl Clone for PairingProof { + fn clone(&self) -> Self { + Self { + quotient: self.quotient, + blinding: self.blinding, + } + } +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct PairingSRS { + pub full_srs: SRS, + pub verifier_srs: SRS, +} + +impl Default for PairingSRS { + fn default() -> Self { + Self { + full_srs: SRS::default(), + verifier_srs: SRS::default(), + } + } +} + +impl Clone for PairingSRS { + fn clone(&self) -> Self { + Self { + full_srs: self.full_srs.clone(), + verifier_srs: self.verifier_srs.clone(), + } + } +} + +impl< + F: PrimeField, + G: CommitmentCurve, + G2: CommitmentCurve, + Pair: PairingEngine, + > PairingSRS +{ + pub fn create(x: F, n: usize) -> Self { + PairingSRS { + full_srs: SRS::create_trusted_setup(x, n), + verifier_srs: SRS::create_trusted_setup(x, 3), + } + } +} + +impl< + F: PrimeField, + G: CommitmentCurve, + G2: CommitmentCurve, + Pair: PairingEngine, + > crate::OpenProof for PairingProof +{ + type SRS = PairingSRS; + + fn open::ScalarField>>( + srs: &Self::SRS, + _group_map: &::Map, + plnms: &[( + DensePolynomialOrEvaluations<::ScalarField, D>, + Option, + PolyComm<::ScalarField>, + )], // vector of polynomial with optional degree bound and commitment randomness + elm: &[::ScalarField], // vector of evaluation points + polyscale: ::ScalarField, // scaling factor for polynoms + _evalscale: ::ScalarField, // scaling factor for evaluation point powers + _sponge: EFqSponge, // sponge + _rng: &mut RNG, + ) -> Self + where + EFqSponge: + Clone + FqSponge<::BaseField, G, ::ScalarField>, + RNG: RngCore + CryptoRng, + { + PairingProof::create(srs, plnms, elm, polyscale).unwrap() + } + + fn verify( + srs: &Self::SRS, + _group_map: &G::Map, + batch: &mut [BatchEvaluationProof], + _rng: &mut RNG, + ) -> bool + where + EFqSponge: FqSponge, + RNG: RngCore + CryptoRng, + { + for BatchEvaluationProof { + sponge: _, + evaluations, + evaluation_points, + polyscale, + evalscale: _, + opening, + combined_inner_product: _, + } in batch.iter() + { + if !opening.verify(srs, evaluations, *polyscale, evaluation_points) { + return false; + } + } + true + } +} + +impl< + F: PrimeField, + G: CommitmentCurve, + G2: CommitmentCurve, + Pair: PairingEngine, + > SRSTrait for PairingSRS +{ + fn max_poly_size(&self) -> usize { + self.full_srs.max_poly_size() + } + + fn get_lagrange_basis(&self, domain_size: usize) -> Option<&Vec>> { + self.full_srs.get_lagrange_basis(domain_size) + } + + fn blinding_commitment(&self) -> G { + self.full_srs.blinding_commitment() + } + + fn commit( + &self, + plnm: &DensePolynomial, + num_chunks: usize, + max: Option, + rng: &mut (impl RngCore + CryptoRng), + ) -> BlindedCommitment { + self.full_srs.commit(plnm, num_chunks, max, rng) + } + + fn mask_custom( + &self, + com: PolyComm, + blinders: &PolyComm, + ) -> Result, CommitmentError> { + self.full_srs.mask_custom(com, blinders) + } + + fn mask( + &self, + comm: PolyComm, + rng: &mut (impl RngCore + CryptoRng), + ) -> BlindedCommitment { + self.full_srs.mask(comm, rng) + } + + fn commit_non_hiding( + &self, + plnm: &DensePolynomial, + num_chunks: usize, + max: Option, + ) -> PolyComm { + self.full_srs.commit_non_hiding(plnm, num_chunks, max) + } + + fn commit_evaluations_non_hiding( + &self, + domain: D, + plnm: &Evaluations>, + ) -> PolyComm { + self.full_srs.commit_evaluations_non_hiding(domain, plnm) + } + + fn commit_evaluations( + &self, + domain: D, + plnm: &Evaluations>, + rng: &mut (impl RngCore + CryptoRng), + ) -> BlindedCommitment { + self.full_srs.commit_evaluations(domain, plnm, rng) + } +} + +/// The polynomial that evaluates to each of `evals` for the respective `elm`s. +fn eval_polynomial(elm: &[F], evals: &[F]) -> DensePolynomial { + assert_eq!(elm.len(), evals.len()); + let (zeta, zeta_omega) = if elm.len() == 2 { + (elm[0], elm[1]) + } else { + todo!() + }; + let (eval_zeta, eval_zeta_omega) = if evals.len() == 2 { + (evals[0], evals[1]) + } else { + todo!() + }; + + // The polynomial that evaluates to `p(zeta)` at `zeta` and `p(zeta_omega)` at + // `zeta_omega`. + // We write `p(x) = a + bx`, which gives + // ```text + // p(zeta) = a + b * zeta + // p(zeta_omega) = a + b * zeta_omega + // ``` + // and so + // ```text + // b = (p(zeta_omega) - p(zeta)) / (zeta_omega - zeta) + // a = p(zeta) - b * zeta + // ``` + let b = (eval_zeta_omega - eval_zeta) / (zeta_omega - zeta); + let a = eval_zeta - b * zeta; + DensePolynomial::from_coefficients_slice(&[a, b]) +} + +/// The polynomial that evaluates to `0` at the evaluation points. +fn divisor_polynomial(elm: &[F]) -> DensePolynomial { + elm.iter() + .map(|value| DensePolynomial::from_coefficients_slice(&[-(*value), F::one()])) + .reduce(|poly1, poly2| &poly1 * &poly2) + .unwrap() +} + +impl< + F: PrimeField, + G: CommitmentCurve, + G2: CommitmentCurve, + Pair: PairingEngine, + > PairingProof +{ + pub fn create>( + srs: &PairingSRS, + plnms: PolynomialsToCombine, // vector of polynomial with optional degree bound and commitment randomness + elm: &[G::ScalarField], // vector of evaluation points + polyscale: G::ScalarField, // scaling factor for polynoms + ) -> Option { + let (p, blinding_factor) = combine_polys::(plnms, polyscale, srs.full_srs.g.len()); + let evals: Vec<_> = elm.iter().map(|pt| p.evaluate(pt)).collect(); + + let quotient_poly = { + let eval_polynomial = eval_polynomial(elm, &evals); + let divisor_polynomial = divisor_polynomial(elm); + let numerator_polynomial = &p - &eval_polynomial; + let (quotient, remainder) = DenseOrSparsePolynomial::divide_with_q_and_r( + &numerator_polynomial.into(), + &divisor_polynomial.into(), + )?; + if !remainder.is_zero() { + return None; + } + quotient + }; + + let quotient = srs + .full_srs + .commit_non_hiding("ient_poly, 1, None) + .unshifted[0]; + + Some(PairingProof { + quotient, + blinding: blinding_factor, + }) + } + pub fn verify( + &self, + srs: &PairingSRS, // SRS + evaluations: &Vec>, // commitments to the polynomials + polyscale: G::ScalarField, // scaling factor for polynoms + elm: &[G::ScalarField], // vector of evaluation points + ) -> bool { + let poly_commitment = { + let mut scalars: Vec = Vec::new(); + let mut points = Vec::new(); + combine_commitments( + evaluations, + &mut scalars, + &mut points, + polyscale, + F::one(), /* TODO: This is inefficient */ + ); + let scalars: Vec<_> = scalars.iter().map(|x| x.into_repr()).collect(); + + VariableBaseMSM::multi_scalar_mul(&points, &scalars) + }; + let evals = combine_evaluations(evaluations, polyscale); + let blinding_commitment = srs.full_srs.h.mul(self.blinding); + let divisor_commitment = srs + .verifier_srs + .commit_non_hiding(&divisor_polynomial(elm), 1, None) + .unshifted[0]; + let eval_commitment = srs + .full_srs + .commit_non_hiding(&eval_polynomial(elm, &evals), 1, None) + .unshifted[0] + .into_projective(); + let numerator_commitment = { poly_commitment - eval_commitment - blinding_commitment }; + + let numerator = Pair::pairing( + numerator_commitment, + Pair::G2Affine::prime_subgroup_generator(), + ); + let scaled_quotient = Pair::pairing(self.quotient, divisor_commitment); + numerator == scaled_quotient + } +} + +#[cfg(test)] +mod tests { + use super::{PairingProof, PairingSRS}; + use crate::commitment::Evaluation; + use crate::evaluation_proof::DensePolynomialOrEvaluations; + use crate::srs::SRS; + use crate::SRS as _; + use ark_bn254::Fr as ScalarField; + use ark_bn254::{G1Affine as G1, G2Affine as G2, Parameters}; + use ark_ec::bn::Bn; + use ark_ff::UniformRand; + use ark_poly::{ + univariate::DensePolynomial, EvaluationDomain, Polynomial, Radix2EvaluationDomain as D, + UVPolynomial, + }; + + use rand::{rngs::StdRng, SeedableRng}; + + #[test] + fn test_pairing_proof() { + let n = 64; + let domain = D::::new(n).unwrap(); + + let rng = &mut StdRng::from_seed([0u8; 32]); + + let x = ScalarField::rand(rng); + + let mut srs = SRS::::create_trusted_setup(x, n); + let verifier_srs = SRS::::create_trusted_setup(x, 3); + srs.add_lagrange_basis(domain); + + let srs = PairingSRS { + full_srs: srs, + verifier_srs, + }; + + let polynomials: Vec<_> = (0..4) + .map(|_| { + let coeffs = (0..63).map(|_| ScalarField::rand(rng)).collect(); + DensePolynomial::from_coefficients_vec(coeffs) + }) + .collect(); + + let comms: Vec<_> = polynomials + .iter() + .map(|p| srs.full_srs.commit(p, 1, None, rng)) + .collect(); + + let polynomials_and_blinders: Vec<(DensePolynomialOrEvaluations<_, D<_>>, _, _)> = + polynomials + .iter() + .zip(comms.iter()) + .map(|(p, comm)| { + let p = DensePolynomialOrEvaluations::DensePolynomial(p); + (p, None, comm.blinders.clone()) + }) + .collect(); + + let evaluation_points = vec![ScalarField::rand(rng), ScalarField::rand(rng)]; + + let evaluations: Vec<_> = polynomials + .iter() + .zip(comms) + .map(|(p, commitment)| { + let evaluations = evaluation_points + .iter() + .map(|x| { + // Inputs are chosen to use only 1 chunk + vec![p.evaluate(x)] + }) + .collect(); + Evaluation { + commitment: commitment.commitment, + evaluations, + degree_bound: None, + } + }) + .collect(); + + let polyscale = ScalarField::rand(rng); + + let pairing_proof = PairingProof::>::create( + &srs, + polynomials_and_blinders.as_slice(), + &evaluation_points, + polyscale, + ) + .unwrap(); + + let res = pairing_proof.verify(&srs, &evaluations, polyscale, &evaluation_points); + assert!(res); + } +} diff --git a/poly-commitment/src/srs.rs b/poly-commitment/src/srs.rs index f4ae235871..c4cd29251b 100644 --- a/poly-commitment/src/srs.rs +++ b/poly-commitment/src/srs.rs @@ -3,8 +3,9 @@ use crate::commitment::CommitmentCurve; use crate::PolyComm; use ark_ec::{AffineCurve, ProjectiveCurve}; -use ark_ff::{BigInteger, PrimeField, Zero}; +use ark_ff::{BigInteger, Field, One, PrimeField, Zero}; use ark_poly::{EvaluationDomain, Radix2EvaluationDomain as D}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use blake2::{Blake2b512, Digest}; use groupmap::GroupMap; use serde::{Deserialize, Serialize}; @@ -15,7 +16,8 @@ use std::collections::HashMap; #[serde_as] #[derive(Debug, Clone, Default, Serialize, Deserialize, Eq)] -pub struct SRS { +#[serde(bound = "G: CanonicalDeserialize + CanonicalSerialize")] +pub struct SRS { /// The vector of group elements for committing to polynomials in coefficient form #[serde_as(as = "Vec")] pub g: Vec, @@ -31,7 +33,7 @@ pub struct SRS { impl PartialEq for SRS where - G: CommitmentCurve, + G: PartialEq, { fn eq(&self, other: &Self) -> bool { self.g == other.g && self.h == other.h @@ -59,27 +61,35 @@ where fn point_of_random_bytes(map: &G::Map, random_bytes: &[u8]) -> G where - G::BaseField: PrimeField, + G::BaseField: Field, { // packing in bit-representation const N: usize = 31; - let mut bits = [false; 8 * N]; - for i in 0..N { - for j in 0..8 { - bits[8 * i + j] = (random_bytes[i] >> j) & 1 == 1; + let extension_degree = G::BaseField::extension_degree() as usize; + + let mut base_fields = Vec::with_capacity(N * extension_degree); + + for base_count in 0..extension_degree { + let mut bits = [false; 8 * N]; + let offset = base_count * N; + for i in 0..N { + for j in 0..8 { + bits[8 * i + j] = (random_bytes[offset + i] >> j) & 1 == 1; + } } - } - let n = ::BigInt::from_bits_be(&bits); - let t = G::BaseField::from_repr(n).expect("packing code has a bug"); + let n = + <::BasePrimeField as PrimeField>::BigInt::from_bits_be(&bits); + let t = <::BasePrimeField as PrimeField>::from_repr(n) + .expect("packing code has a bug"); + base_fields.push(t) + } + let t = G::BaseField::from_base_prime_field_elems(&base_fields).unwrap(); let (x, y) = map.to_group(t); G::of_coordinates(x, y) } -impl SRS -where - G::BaseField: PrimeField, -{ +impl SRS { pub fn max_degree(&self) -> usize { self.g.len() } @@ -219,6 +229,39 @@ where self.lagrange_bases.insert(n, chunked_commitments); } + /// This function creates a trusted-setup SRS instance for circuits with number of rows up to `depth`. + pub fn create_trusted_setup(x: G::ScalarField, depth: usize) -> Self { + let m = G::Map::setup(); + + let mut x_pow = G::ScalarField::one(); + let g: Vec<_> = (0..depth) + .map(|_| { + let res = G::prime_subgroup_generator().mul(x_pow); + x_pow *= x; + res.into_affine() + }) + .collect(); + + const MISC: usize = 1; + let [h]: [G; MISC] = array::from_fn(|i| { + let mut h = Blake2b512::new(); + h.update("srs_misc".as_bytes()); + h.update((i as u32).to_be_bytes()); + point_of_random_bytes(&m, &h.finalize()) + }); + + SRS { + g, + h, + lagrange_bases: HashMap::new(), + } + } +} + +impl SRS +where + G::BaseField: PrimeField, +{ /// This function creates SRS instance for circuits with number of rows up to `depth`. pub fn create(depth: usize) -> Self { let m = G::Map::setup(); diff --git a/poly-commitment/src/tests/batch_15_wires.rs b/poly-commitment/src/tests/batch_15_wires.rs index 5d35aed6b3..570e8e8752 100644 --- a/poly-commitment/src/tests/batch_15_wires.rs +++ b/poly-commitment/src/tests/batch_15_wires.rs @@ -5,6 +5,7 @@ use crate::{ commitment::{combined_inner_product, BatchEvaluationProof, CommitmentCurve, Evaluation}, evaluation_proof::DensePolynomialOrEvaluations, srs::SRS, + SRS as _, }; use ark_ff::{UniformRand, Zero}; use ark_poly::{univariate::DensePolynomial, Radix2EvaluationDomain, UVPolynomial}; @@ -29,6 +30,8 @@ where let size = 1 << 7; let srs = SRS::::create(size); + let num_chunks = 1; + let group_map = ::Map::setup(); let sponge = DefaultFqSponge::::new( @@ -79,9 +82,9 @@ where let comm = (0..a.len()) .map(|i| { ( - srs.commit(&a[i].clone(), bounds[i], rng), + srs.commit(&a[i].clone(), num_chunks, bounds[i], rng), x.iter() - .map(|xx| a[i].to_chunked_polynomial(size).evaluate_chunks(*xx)) + .map(|xx| a[i].to_chunked_polynomial(1, size).evaluate_chunks(*xx)) .collect::>(), bounds[i], ) diff --git a/poly-commitment/src/tests/commitment.rs b/poly-commitment/src/tests/commitment.rs index b32101daf2..dedcd0ad6e 100644 --- a/poly-commitment/src/tests/commitment.rs +++ b/poly-commitment/src/tests/commitment.rs @@ -5,6 +5,7 @@ use crate::{ }, evaluation_proof::{DensePolynomialOrEvaluations, OpeningProof}, srs::SRS, + SRS as _, }; use ark_ff::{UniformRand, Zero}; use ark_poly::{univariate::DensePolynomial, Radix2EvaluationDomain, UVPolynomial}; @@ -76,7 +77,8 @@ impl AggregatedEvaluationProof { pub fn verify_type( &self, srs: &SRS, - ) -> BatchEvaluationProof> { + ) -> BatchEvaluationProof, OpeningProof> + { let mut coms = vec![]; for eval_com in &self.eval_commitments { assert_eq!(self.eval_points.len(), eval_com.chunked_evals.len()); @@ -139,6 +141,8 @@ fn test_randomised(mut rng: &mut RNG) { // create an SRS optimized for polynomials of degree 2^7 - 1 let srs = SRS::::create(1 << 7); + let num_chunks = 1; + // TODO: move to bench let mut time_commit = Duration::new(0, 0); let mut time_open = Duration::new(0, 0); @@ -173,13 +177,13 @@ fn test_randomised(mut rng: &mut RNG) { let BlindedCommitment { commitment: chunked_commitment, blinders: chunked_blinding, - } = srs.commit(&poly, bound, &mut rng); + } = srs.commit(&poly, num_chunks, bound, &mut rng); time_commit += timer.elapsed(); let mut chunked_evals = vec![]; for point in eval_points.clone() { chunked_evals.push( - poly.to_chunked_polynomial(srs.g.len()) + poly.to_chunked_polynomial(1, srs.g.len()) .evaluate_chunks(point), ); } diff --git a/poseidon/Cargo.toml b/poseidon/Cargo.toml index 10b44ec4d4..7a122051e4 100644 --- a/poseidon/Cargo.toml +++ b/poseidon/Cargo.toml @@ -37,4 +37,3 @@ ark-serialize = "0.3.0" [features] default = [] ocaml_types = [ "ocaml", "ocaml-gen", ] -debug_sponge = [] diff --git a/poseidon/src/dummy_values.rs b/poseidon/src/dummy_values.rs new file mode 100644 index 0000000000..9bfbc82335 --- /dev/null +++ b/poseidon/src/dummy_values.rs @@ -0,0 +1,831 @@ +use crate::poseidon::ArithmeticSpongeParams; +use ark_ff::Field; +use std::{fmt::Debug, str::FromStr}; + +/// Placeholder dummy value for the kimchi configuration, suitable for fields of bitlength 254 and +/// above. +/// These parameters are duplicated from the Vesta parameters, generated with +/// ```text +/// ./pasta/params.sage --rounds 55 rust 3 kimchi +/// ``` +pub fn kimchi_dummy>() -> ArithmeticSpongeParams { + ArithmeticSpongeParams { + mds: vec![ + vec![ + Fp::from_str( + "12035446894107573964500871153637039653510326950134440362813193268448863222019", + ) + .unwrap(), + Fp::from_str( + "25461374787957152039031444204194007219326765802730624564074257060397341542093", + ) + .unwrap(), + Fp::from_str( + "27667907157110496066452777015908813333407980290333709698851344970789663080149", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "4491931056866994439025447213644536587424785196363427220456343191847333476930", + ) + .unwrap(), + Fp::from_str( + "14743631939509747387607291926699970421064627808101543132147270746750887019919", + ) + .unwrap(), + Fp::from_str( + "9448400033389617131295304336481030167723486090288313334230651810071857784477", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "10525578725509990281643336361904863911009900817790387635342941550657754064843", + ) + .unwrap(), + Fp::from_str( + "27437632000253211280915908546961303399777448677029255413769125486614773776695", + ) + .unwrap(), + Fp::from_str( + "27566319851776897085443681456689352477426926500749993803132851225169606086988", + ) + .unwrap(), + ], + ], + + round_constants: vec![ + vec![ + Fp::from_str( + "21155079691556475130150866428468322463125560312786319980770950159250751855431", + ) + .unwrap(), + Fp::from_str( + "16883442198399350202652499677723930673110172289234921799701652810789093522349", + ) + .unwrap(), + Fp::from_str( + "17030687036425314703519085065002231920937594822150793091243263847382891822670", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "25216718237129482752721276445368692059997901880654047883630276346421457427360", + ) + .unwrap(), + Fp::from_str( + "9054264347380455706540423067244764093107767235485930776517975315876127782582", + ) + .unwrap(), + Fp::from_str( + "26439087121446593160953570192891907825526260324480347638727375735543609856888", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "15251000790817261169639394496851831733819930596125214313084182526610855787494", + ) + .unwrap(), + Fp::from_str( + "10861916012597714684433535077722887124099023163589869801449218212493070551767", + ) + .unwrap(), + Fp::from_str( + "18597653523270601187312528478986388028263730767495975370566527202946430104139", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "15831416454198644276563319006805490049460322229057756462580029181847589006611", + ) + .unwrap(), + Fp::from_str( + "15171856919255965617705854914448645702014039524159471542852132430360867202292", + ) + .unwrap(), + Fp::from_str( + "15488495958879593647482715143904752785889816789652405888927117106448507625751", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "19039802679983063488134304670998725949842655199289961967801223969839823940152", + ) + .unwrap(), + Fp::from_str( + "4720101937153217036737330058775388037616286510783561045464678919473230044408", + ) + .unwrap(), + Fp::from_str( + "10226318327254973427513859412126640040910264416718766418164893837597674300190", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "20878756131129218406920515859235137275859844638301967889441262030146031838819", + ) + .unwrap(), + Fp::from_str( + "7178475685651744631172532830973371642652029385893667810726019303466125436953", + ) + .unwrap(), + Fp::from_str( + "1996970955918516145107673266490486752153434673064635795711751450164177339618", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "15205545916434157464929420145756897321482314798910153575340430817222504672630", + ) + .unwrap(), + Fp::from_str( + "25660296961552699573824264215804279051322332899472350724416657386062327210698", + ) + .unwrap(), + Fp::from_str( + "13842611741937412200312851417353455040950878279339067816479233688850376089318", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "1383799642177300432144836486981606294838630135265094078921115713566691160459", + ) + .unwrap(), + Fp::from_str( + "1135532281155277588005319334542025976079676424839948500020664227027300010929", + ) + .unwrap(), + Fp::from_str( + "4384117336930380014868572224801371377488688194169758696438185377724744869360", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "21725577575710270071808882335900370909424604447083353471892004026180492193649", + ) + .unwrap(), + Fp::from_str( + "676128913284806802699862508051022306366147359505124346651466289788974059668", + ) + .unwrap(), + Fp::from_str( + "25186611339598418732666781049829183886812651492845008333418424746493100589207", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "10402240124664763733060094237696964473609580414190944671778761753887884341073", + ) + .unwrap(), + Fp::from_str( + "11918307118590866200687906627767559273324023585642003803337447146531313172441", + ) + .unwrap(), + Fp::from_str( + "16895677254395661024186292503536662354181715337630376909778003268311296637301", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "23818602699032741669874498456696325705498383130221297580399035778119213224810", + ) + .unwrap(), + Fp::from_str( + "4285193711150023248690088154344086684336247475445482883105661485741762600154", + ) + .unwrap(), + Fp::from_str( + "19133204443389422404056150665863951250222934590192266371578950735825153238612", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "5515589673266504033533906836494002702866463791762187140099560583198974233395", + ) + .unwrap(), + Fp::from_str( + "11830435563729472715615302060564876527985621376031612798386367965451821182352", + ) + .unwrap(), + Fp::from_str( + "7510711479224915247011074129666445216001563200717943545636462819681638560128", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "24694843201907722940091503626731830056550128225297370217610328578733387733444", + ) + .unwrap(), + Fp::from_str( + "27361655066973784653563425664091383058914302579694897188019422193564924110528", + ) + .unwrap(), + Fp::from_str( + "21606788186194534241166833954371013788633495786419718955480491478044413102713", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "19934060063390905409309407607814787335159021816537006003398035237707924006757", + ) + .unwrap(), + Fp::from_str( + "8495813630060004961768092461554180468161254914257386012937942498774724649553", + ) + .unwrap(), + Fp::from_str( + "27524960680529762202005330464726908693944660961000958842417927307941561848461", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "15178481650950399259757805400615635703086255035073919114667254549690862896985", + ) + .unwrap(), + Fp::from_str( + "16164780354695672259791105197274509251141405713012804937107314962551600380870", + ) + .unwrap(), + Fp::from_str( + "10529167793600778056702353412758954281652843049850979705476598375597148191979", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "721141070179074082553302896292167103755384741083338957818644728290501449040", + ) + .unwrap(), + Fp::from_str( + "22044408985956234023934090378372374883099115753118261312473550998188148912041", + ) + .unwrap(), + Fp::from_str( + "27068254103241989852888872162525066148367014691482601147536314217249046186315", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "3880429241956357176819112098792744584376727450211873998699580893624868748961", + ) + .unwrap(), + Fp::from_str( + "17387097125522937623262508065966749501583017524609697127088211568136333655623", + ) + .unwrap(), + Fp::from_str( + "6256814421247770895467770393029354017922744712896100913895513234184920631289", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "2942627347777337187690939671601251987500285937340386328746818861972711408579", + ) + .unwrap(), + Fp::from_str( + "24031654937764287280548628128490074801809101323243546313826173430897408945397", + ) + .unwrap(), + Fp::from_str( + "14401457902976567713827506689641442844921449636054278900045849050301331732143", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "20170632877385406450742199836933900257692624353889848352407590794211839130727", + ) + .unwrap(), + Fp::from_str( + "24056496193857444725324410428861722338174099794084586764867109123681727290181", + ) + .unwrap(), + Fp::from_str( + "11257913009612703357266904349759250619633397075667824800196659858304604714965", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "22228158921984425749199071461510152694025757871561406897041788037116931009246", + ) + .unwrap(), + Fp::from_str( + "9152163378317846541430311327336774331416267016980485920222768197583559318682", + ) + .unwrap(), + Fp::from_str( + "13906695403538884432896105059360907560653506400343268230130536740148070289175", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "7220714562509721437034241786731185291972496952091254931195414855962344025067", + ) + .unwrap(), + Fp::from_str( + "27608867305903811397208862801981345878179337369367554478205559689592889691927", + ) + .unwrap(), + Fp::from_str( + "13288465747219756218882697408422850918209170830515545272152965967042670763153", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "8251343892709140154567051772980662609566359215743613773155065627504813327653", + ) + .unwrap(), + Fp::from_str( + "22035238365102171608166944627493632660244312563934708756134297161332908879090", + ) + .unwrap(), + Fp::from_str( + "13560937766273321037807329177749403409731524715067067740487246745322577571823", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "21652518608959234550262559135285358020552897349934571164032339186996805408040", + ) + .unwrap(), + Fp::from_str( + "22479086963324173427634460342145551255011746993910136574926173581069603086891", + ) + .unwrap(), + Fp::from_str( + "13676501958531751140966255121288182631772843001727158043704693838707387130095", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "5680310394102577950568930199056707827608275306479994663197187031893244826674", + ) + .unwrap(), + Fp::from_str( + "25125360450906166639190392763071557410047335755341060350879819485506243289998", + ) + .unwrap(), + Fp::from_str( + "22659254028501616785029594492374243581602744364859762239504348429834224676676", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "23101411405087512171421838856759448177512679869882987631073569441496722536782", + ) + .unwrap(), + Fp::from_str( + "24149774013240355952057123660656464942409328637280437515964899830988178868108", + ) + .unwrap(), + Fp::from_str( + "5782097512368226173095183217893826020351125522160843964147125728530147423065", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "13540762114500083869920564649399977644344247485313990448129838910231204868111", + ) + .unwrap(), + Fp::from_str( + "20421637734328811337527547703833013277831804985438407401987624070721139913982", + ) + .unwrap(), + Fp::from_str( + "7742664118615900772129122541139124149525273579639574972380600206383923500701", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "1109643801053963021778418773196543643970146666329661268825691230294798976318", + ) + .unwrap(), + Fp::from_str( + "16580663920817053843121063692728699890952505074386761779275436996241901223840", + ) + .unwrap(), + Fp::from_str( + "14638514680222429058240285918830106208025229459346033470787111294847121792366", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "17080385857812672649489217965285727739557573467014392822992021264701563205891", + ) + .unwrap(), + Fp::from_str( + "26176268111736737558502775993925696791974738793095023824029827577569530708665", + ) + .unwrap(), + Fp::from_str( + "4382756253392449071896813428140986330161215829425086284611219278674857536001", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "13934033814940585315406666445960471293638427404971553891617533231178815348902", + ) + .unwrap(), + Fp::from_str( + "27054912732979753314774418228399230433963143177662848084045249524271046173121", + ) + .unwrap(), + Fp::from_str( + "28916070403698593376490976676534962592542013020010643734621202484860041243391", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "24820015636966360150164458094894587765384135259446295278101998130934963922381", + ) + .unwrap(), + Fp::from_str( + "7969535238488580655870884015145760954416088335296905520306227531221721881868", + ) + .unwrap(), + Fp::from_str( + "7690547696740080985104189563436871930607055124031711216224219523236060212249", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "9712576468091272384496248353414290908377825697488757134833205246106605867289", + ) + .unwrap(), + Fp::from_str( + "12148698031438398980683630141370402088785182722473169207262735228500190477924", + ) + .unwrap(), + Fp::from_str( + "14359657643133476969781351728574842164124292705609900285041476162075031948227", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "23563839965372067275137992801035780013422228997724286060975035719045352435470", + ) + .unwrap(), + Fp::from_str( + "4184634822776323233231956802962638484057536837393405750680645555481330909086", + ) + .unwrap(), + Fp::from_str( + "16249511905185772125762038789038193114431085603985079639889795722501216492487", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "11001863048692031559800673473526311616702863826063550559568315794438941516621", + ) + .unwrap(), + Fp::from_str( + "4702354107983530219070178410740869035350641284373933887080161024348425080464", + ) + .unwrap(), + Fp::from_str( + "23751680507533064238793742311430343910720206725883441625894258483004979501613", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "28670526516158451470169873496541739545860177757793329093045522432279094518766", + ) + .unwrap(), + Fp::from_str( + "3568312993091537758218792253361873752799472566055209125947589819564395417072", + ) + .unwrap(), + Fp::from_str( + "1819755756343439646550062754332039103654718693246396323207323333948654200950", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "5372129954699791301953948907349887257752247843844511069896766784624930478273", + ) + .unwrap(), + Fp::from_str( + "17512156688034945920605615850550150476471921176481039715733979181538491476080", + ) + .unwrap(), + Fp::from_str( + "25777105342317622165159064911913148785971147228777677435200128966844208883059", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "25350392006158741749134238306326265756085455157012701586003300872637887157982", + ) + .unwrap(), + Fp::from_str( + "20096724945283767296886159120145376967480397366990493578897615204296873954844", + ) + .unwrap(), + Fp::from_str( + "8063283381910110762785892100479219642751540456251198202214433355775540036851", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "4393613870462297385565277757207010824900723217720226130342463666351557475823", + ) + .unwrap(), + Fp::from_str( + "9874972555132910032057499689351411450892722671352476280351715757363137891038", + ) + .unwrap(), + Fp::from_str( + "23590926474329902351439438151596866311245682682435235170001347511997242904868", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "17723373371137275859467518615551278584842947963894791032296774955869958211070", + ) + .unwrap(), + Fp::from_str( + "2350345015303336966039836492267992193191479606566494799781846958620636621159", + ) + .unwrap(), + Fp::from_str( + "27755207882790211140683010581856487965587066971982625511152297537534623405016", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "6584607987789185408123601849106260907671314994378225066806060862710814193906", + ) + .unwrap(), + Fp::from_str( + "609759108847171587253578490536519506369136135254150754300671591987320319770", + ) + .unwrap(), + Fp::from_str( + "28435187585965602110074342250910608316032945187476441868666714022529803033083", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "16016664911651770663938916450245705908287192964254704641717751103464322455303", + ) + .unwrap(), + Fp::from_str( + "17551273293154696089066968171579395800922204266630874071186322718903959339163", + ) + .unwrap(), + Fp::from_str( + "20414195497994754529479032467015716938594722029047207834858832838081413050198", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "19773307918850685463180290966774465805537520595602496529624568184993487593855", + ) + .unwrap(), + Fp::from_str( + "24598603838812162820757838364185126333280131847747737533989799467867231166980", + ) + .unwrap(), + Fp::from_str( + "11040972566103463398651864390163813377135738019556270484707889323659789290225", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "5189242080957784038860188184443287562488963023922086723850863987437818393811", + ) + .unwrap(), + Fp::from_str( + "1435203288979376557721239239445613396009633263160237764653161500252258220144", + ) + .unwrap(), + Fp::from_str( + "13066591163578079667911016543985168493088721636164837520689376346534152547210", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "17345901407013599418148210465150865782628422047458024807490502489711252831342", + ) + .unwrap(), + Fp::from_str( + "22139633362249671900128029132387275539363684188353969065288495002671733200348", + ) + .unwrap(), + Fp::from_str( + "1061056418502836172283188490483332922126033656372467737207927075184389487061", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "10241738906190857416046229928455551829189196941239601756375665129874835232299", + ) + .unwrap(), + Fp::from_str( + "27808033332417845112292408673209999320983657696373938259351951416571545364415", + ) + .unwrap(), + Fp::from_str( + "18820154989873674261497645724903918046694142479240549687085662625471577737140", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "7983688435214640842673294735439196010654951226956101271763849527529940619307", + ) + .unwrap(), + Fp::from_str( + "17067928657801807648925755556866676899145460770352731818062909643149568271566", + ) + .unwrap(), + Fp::from_str( + "24472070825156236829515738091791182856425635433388202153358580534810244942762", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "25752201169361795911258625731016717414310986450004737514595241038036936283227", + ) + .unwrap(), + Fp::from_str( + "26041505376284666160132119888949817249574689146924196064963008712979256107535", + ) + .unwrap(), + Fp::from_str( + "23977050489096115210391718599021827780049209314283111721864956071820102846008", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "26678257097278788410676026718736087312816016749016738933942134600725962413805", + ) + .unwrap(), + Fp::from_str( + "10480026985951498884090911619636977502506079971893083605102044931823547311729", + ) + .unwrap(), + Fp::from_str( + "21126631300593007055117122830961273871167754554670317425822083333557535463396", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "1564862894215434177641156287699106659379648851457681469848362532131406827573", + ) + .unwrap(), + Fp::from_str( + "13247162472821152334486419054854847522301612781818744556576865965657773174584", + ) + .unwrap(), + Fp::from_str( + "8673615954922496961704442777870253767001276027366984739283715623634850885984", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "2794525076937490807476666942602262298677291735723129868457629508555429470085", + ) + .unwrap(), + Fp::from_str( + "4656175953888995612264371467596648522808911819700660048695373348629527757049", + ) + .unwrap(), + Fp::from_str( + "23221574237857660318443567292601561932489621919104226163978909845174616477329", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "1878392460078272317716114458784636517603142716091316893054365153068227117145", + ) + .unwrap(), + Fp::from_str( + "2370412714505757731457251173604396662292063533194555369091306667486647634097", + ) + .unwrap(), + Fp::from_str( + "17409784861870189930766639925394191888667317762328427589153989811980152373276", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "25869136641898166514111941708608048269584233242773814014385564101168774293194", + ) + .unwrap(), + Fp::from_str( + "11361209360311194794795494027949518465383235799633128250259863567683341091323", + ) + .unwrap(), + Fp::from_str( + "14913258820718821235077379851098720071902170702113538811112331615559409988569", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "12957012022018304419868287033513141736995211906682903915897515954290678373899", + ) + .unwrap(), + Fp::from_str( + "17128889547450684566010972445328859295804027707361763477802050112063630550300", + ) + .unwrap(), + Fp::from_str( + "23329219085372232771288306767242735245018143857623151155581182779769305489903", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "1607741027962933685476527275858938699728586794398382348454736018784568853937", + ) + .unwrap(), + Fp::from_str( + "2611953825405141009309433982109911976923326848135736099261873796908057448476", + ) + .unwrap(), + Fp::from_str( + "7372230383134982628913227482618052530364724821976589156840317933676130378411", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "20203606758501212620842735123770014952499754751430660463060696990317556818571", + ) + .unwrap(), + Fp::from_str( + "4678361398979174017885631008335559529633853759463947250620930343087749944307", + ) + .unwrap(), + Fp::from_str( + "27176462634198471376002287271754121925750749676999036165457559387195124025594", + ) + .unwrap(), + ], + vec![ + Fp::from_str( + "6361981813552614697928697527332318530502852015189048838072565811230204474643", + ) + .unwrap(), + Fp::from_str( + "13815234633287489023151647353581705241145927054858922281829444557905946323248", + ) + .unwrap(), + Fp::from_str( + "10888828634279127981352133512429657747610298502219125571406085952954136470354", + ) + .unwrap(), + ], + ], + } +} diff --git a/poseidon/src/lib.rs b/poseidon/src/lib.rs index 20d7ca4df8..943d54cb87 100644 --- a/poseidon/src/lib.rs +++ b/poseidon/src/lib.rs @@ -1,4 +1,5 @@ pub mod constants; +pub mod dummy_values; pub mod pasta; pub mod permutation; pub mod poseidon; diff --git a/poseidon/src/poseidon.rs b/poseidon/src/poseidon.rs index 0ee363edc9..ff06022910 100644 --- a/poseidon/src/poseidon.rs +++ b/poseidon/src/poseidon.rs @@ -1,8 +1,5 @@ //! This module implements Poseidon Hash Function primitive -#[cfg(feature = "debug_sponge")] -use std::sync::atomic::{AtomicU64, Ordering::SeqCst}; - use crate::constants::SpongeConstants; use crate::permutation::{full_round, poseidon_block_cipher}; use ark_ff::Field; @@ -52,8 +49,6 @@ pub struct ArithmeticSponge { pub state: Vec, params: &'static ArithmeticSpongeParams, pub constants: std::marker::PhantomData, - #[cfg(feature = "debug_sponge")] - pub id: u64, } impl ArithmeticSponge { @@ -77,17 +72,12 @@ impl Sponge for ArithmeticSponge { state.push(F::zero()); } - #[cfg(feature = "debug_sponge")] - static COUNTER: AtomicU64 = AtomicU64::new(0); - ArithmeticSponge { state, rate, sponge_state: SpongeState::Absorbed(0), params, constants: std::marker::PhantomData, - #[cfg(feature = "debug_sponge")] - id: COUNTER.fetch_add(1, SeqCst), } } diff --git a/poseidon/src/sponge.rs b/poseidon/src/sponge.rs index 385afbf539..ff7f00a412 100644 --- a/poseidon/src/sponge.rs +++ b/poseidon/src/sponge.rs @@ -3,9 +3,6 @@ use crate::poseidon::{ArithmeticSponge, ArithmeticSpongeParams, Sponge}; use ark_ec::{short_weierstrass_jacobian::GroupAffine, SWModelParameters}; use ark_ff::{BigInteger, Field, FpParameters, One, PrimeField, Zero}; -#[cfg(feature = "debug_sponge")] -use o1_utils::FieldHelpers; - pub use crate::FqSponge; pub const CHALLENGE_LENGTH_IN_LIMBS: usize = 2; @@ -132,49 +129,6 @@ where } } -// Debugging macros -- these only insert code when non-release build and -// "debug_sponge" feature is enabled. -macro_rules! debug_sponge { - ($name:expr, $sponge:expr) => { - #[cfg(feature = "debug_sponge")] - { - // No input - debug_sponge_print_state!($name, $sponge); - } - }; - ($name:expr, $input:expr, $sponge:expr) => { - #[cfg(feature = "debug_sponge")] - { - // Field input - debug_sponge_print_state!($name, $sponge); - - println!( - "debug_sponge: id{} {} input {}", - $sponge.id, - $name, - $input.to_hex() - ); - } - }; -} -#[cfg(feature = "debug_sponge")] -macro_rules! debug_sponge_print_state { - ($name:expr, $sponge:expr) => { - println!( - "debug_sponge: id{} {} state {:?} {}", - $sponge.id, - $name, - $sponge.sponge_state, - $sponge - .state - .iter() - .map(|f| { f.to_hex() }) - .collect::>() - .join(" "), - ); - }; -} - impl FqSponge, P::ScalarField> for DefaultFqSponge where @@ -183,7 +137,6 @@ where { fn new(params: &'static ArithmeticSpongeParams) -> DefaultFqSponge { let sponge = ArithmeticSponge::new(params); - debug_sponge!("new", sponge); DefaultFqSponge { sponge, last_squeezed: vec![], @@ -196,14 +149,10 @@ where if g.infinity { // absorb a fake point (0, 0) let zero = P::BaseField::zero(); - debug_sponge!("absorb", zero, self.sponge); self.sponge.absorb(&[zero]); - debug_sponge!("absorb", zero, self.sponge); self.sponge.absorb(&[zero]); } else { - debug_sponge!("absorb", g.x, self.sponge); self.sponge.absorb(&[g.x]); - debug_sponge!("absorb", g.y, self.sponge); self.sponge.absorb(&[g.y]); } } @@ -213,7 +162,6 @@ where self.last_squeezed = vec![]; for fe in x { - debug_sponge!("absorb", fe, self.sponge); self.sponge.absorb(&[*fe]) } } @@ -232,7 +180,6 @@ where ::BigInt::from_bits_le(&bits), ) .expect("padding code has a bug"); - debug_sponge!("absorb", fe, self.sponge); self.sponge.absorb(&[fe]); } else { let low_bit = if bits[0] { @@ -246,16 +193,13 @@ where ) .expect("padding code has a bug"); - debug_sponge!("absorb", high_bits, self.sponge); self.sponge.absorb(&[high_bits]); - debug_sponge!("absorb", low_bit, self.sponge); self.sponge.absorb(&[low_bit]); } }); } fn digest(mut self) -> P::ScalarField { - debug_sponge!("squeeze", self.sponge); let x: ::BigInt = self.squeeze_field().into_repr(); // Returns zero for values that are too large. // This means that there is a bias for the value zero (in one of the curve). @@ -267,17 +211,14 @@ where } fn digest_fq(mut self) -> P::BaseField { - debug_sponge!("squeeze", self.sponge); self.squeeze_field() } fn challenge(&mut self) -> P::ScalarField { - debug_sponge!("squeeze", self.sponge); self.squeeze(CHALLENGE_LENGTH_IN_LIMBS) } fn challenge_fq(&mut self) -> P::BaseField { - debug_sponge!("squeeze", self.sponge); self.squeeze_field() } } diff --git a/rust-toolchain b/rust-toolchain index 65ee095984..cc31fcd4f5 100644 --- a/rust-toolchain +++ b/rust-toolchain @@ -1 +1 @@ -1.67.0 +1.72 diff --git a/tools/kimchi-asm/CHANGELOG.md b/tools/kimchi-asm/CHANGELOG.md deleted file mode 100644 index 56773f1e6a..0000000000 --- a/tools/kimchi-asm/CHANGELOG.md +++ /dev/null @@ -1,13 +0,0 @@ -# Changelog - -All notable changes to this project will be documented in this file. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -## [Unreleased] - - -## 0.1.0 (2023-03-09) - -- Initial release diff --git a/tools/kimchi-asm/Cargo.toml b/tools/kimchi-asm/Cargo.toml deleted file mode 100644 index 4bd69deaef..0000000000 --- a/tools/kimchi-asm/Cargo.toml +++ /dev/null @@ -1,24 +0,0 @@ -[package] -name = "kimchi-asm" -version = "0.1.0" -description = "A tool to read circuits" -repository = "https://github.com/o1-labs/proof-systems" -homepage = "https://o1-labs.github.io/proof-systems/" -documentation = "https://o1-labs.github.io/proof-systems/rustdoc/" -readme = "README.md" -edition = "2021" -license = "Apache-2.0" - -[dependencies] -ark-ec = "0.3.0" -ark-ff = "0.3.0" -serde = { version = "1.0.136", features = ["derive"] } -serde_json = "1.0.79" -serde_with = "1.10.0" -tinytemplate = "1.1" - -mina-curves = { path = "../../curves", version = "0.1.0" } -kimchi = { path = "../../kimchi", version = "0.1.0" } -o1-utils = { path = "../../utils", version = "0.1.0" } -mina-poseidon = { path = "../../poseidon", version = "0.1.0" } -poly-commitment = { path = "../../poly-commitment", version = "0.1.0" } diff --git a/tools/kimchi-asm/README.md b/tools/kimchi-asm/README.md deleted file mode 100644 index b55767c23f..0000000000 --- a/tools/kimchi-asm/README.md +++ /dev/null @@ -1,34 +0,0 @@ -# Kimchi ASM - -Allows you to deserialize a circuit in JSON format and print it using some pseudo-assembly language. - -Simple pipe your JSON to this binary. For example: - -```console -$ cargo run --bin kimchi-asm < examples/circuits/poseidon.json -``` - -You will get an output like: - -```asm -row0.pub.Generic<1,0,0,0,0> -.l1 -> row4.l1 - -row1.pub.Generic<1,0,0,0,0> -.l1 -> row2.l1 - -row2.Generic<-1,0,0,1,0><-1,0,0,1,0> -.l1 -> row4.r1, .r1 -> row1.l1 -.l2 -> row0.l1, .r2 -> row2.l2 - -row3.Generic<-1,0,0,1,0><-1,0,0,1,0> -.l1 -> row4.r2, .r1 -> row3.l1 -.l2 -> row4.l2, .r2 -> row3.l2 - -row4.Generic<0,0,1,-1,0><0,0,1,-1,0> -.l1 -> row2.r2, .r1 -> row2.r1, .o1 -> row5.l1 -.l2 -> row3.r2, .r2 -> row3.r1, .o2 -> row4.o1 - -row5.Generic<1,0,0,0,-1> -.l1 -> row4.o2 -``` diff --git a/tools/kimchi-asm/src/main.rs b/tools/kimchi-asm/src/main.rs deleted file mode 100644 index 257146dc83..0000000000 --- a/tools/kimchi-asm/src/main.rs +++ /dev/null @@ -1,36 +0,0 @@ -use ark_ff::PrimeField; -use kimchi::circuits::gate::{Circuit, CircuitGate}; -use mina_curves::pasta::Fp; -use serde::de::DeserializeOwned; - -#[derive(serde::Deserialize)] -pub struct DeserializableCircuit -where - F: PrimeField, -{ - pub public_input_size: usize, - #[serde(bound = "CircuitGate: DeserializeOwned")] - pub gates: Vec>, -} - -impl<'a, F> From<&'a DeserializableCircuit> for Circuit<'a, F> -where - F: PrimeField, -{ - fn from(circuit: &'a DeserializableCircuit) -> Self { - Circuit::new(circuit.public_input_size, &circuit.gates) - } -} - -fn main() { - // get what was piped to this binary - let stdin = std::io::stdin(); - - // deserialize it to JSON - let circuit: DeserializableCircuit = - serde_json::from_reader(stdin).expect("couldn't deserialize the circuit"); - - let circuit: Circuit<_> = (&circuit).into(); - - println!("{}", circuit.generate_asm()); -} diff --git a/tools/kimchi-visu/src/lib.rs b/tools/kimchi-visu/src/lib.rs index 54e833728b..541a51a106 100644 --- a/tools/kimchi-visu/src/lib.rs +++ b/tools/kimchi-visu/src/lib.rs @@ -13,7 +13,7 @@ use kimchi::{ curve::KimchiCurve, prover_index::ProverIndex, }; -use poly_commitment::commitment::CommitmentCurve; +use poly_commitment::{commitment::CommitmentCurve, evaluation_proof::OpeningProof}; use serde::Serialize; use std::{ collections::HashMap, @@ -75,10 +75,12 @@ where /// # Panics /// /// Will panic if `TinyTemplate::render()` returns `Error` or `std::fs::File::create()` returns `Error`. -pub fn visu( - index: &ProverIndex, +pub fn visu( + index: &ProverIndex, COLUMNS>, witness: Option>, -) { +) where + G::BaseField: PrimeField, +{ // serialize index let index = serde_json::to_string(index).expect("couldn't serialize index"); let mut data = format!("const index = {index};"); diff --git a/tools/kimchi-visu/src/main.rs b/tools/kimchi-visu/src/main.rs index afcf8a988a..ce81d5add7 100644 --- a/tools/kimchi-visu/src/main.rs +++ b/tools/kimchi-visu/src/main.rs @@ -2,7 +2,7 @@ use kimchi::{ circuits::{ gate::CircuitGate, polynomials::{generic::GenericGateSpec, poseidon::generate_witness}, - wires::{Wire, COLUMNS}, + wires::{Wire, KIMCHI_COLS}, }, curve::KimchiCurve, prover_index::testing::new_index_for_test, @@ -59,7 +59,7 @@ fn main() { }; // create the index - let index = new_index_for_test::(gates, public); + let index = new_index_for_test::(gates, public); // create the witness let mut witness = Witness::new(row + 1).inner(); diff --git a/tools/kimchi-visu/src/witness.rs b/tools/kimchi-visu/src/witness.rs index 404af0375c..5ef0c02b6d 100644 --- a/tools/kimchi-visu/src/witness.rs +++ b/tools/kimchi-visu/src/witness.rs @@ -1,11 +1,11 @@ use ark_ff::Field; -use kimchi::circuits::polynomial::COLUMNS; +use kimchi::circuits::polynomial::KIMCHI_COLS; use serde::Serialize; use serde_with::serde_as; use std::array; /// The type that represents the execution trace. -/// It represents a table of [COLUMNS] columns, with `n` rows. +/// It represents a table of [KIMCHI_COLS] columns, with `n` rows. /// `n` being the maximum size of the circuit, and the size of the domain. #[serde_as] #[derive(Debug, Serialize)] @@ -13,8 +13,8 @@ pub struct Witness where F: Field, { - #[serde_as(as = "[Vec; COLUMNS]")] - inner: [Vec; COLUMNS], + #[serde_as(as = "[Vec; KIMCHI_COLS]")] + inner: [Vec; KIMCHI_COLS], } impl Witness @@ -31,16 +31,16 @@ where /// Returns the inner witness. // TODO: deprecate this - pub fn inner(self) -> [Vec; COLUMNS] { + pub fn inner(self) -> [Vec; KIMCHI_COLS] { self.inner } } -impl From<[Vec; COLUMNS]> for Witness +impl From<[Vec; KIMCHI_COLS]> for Witness where F: Field, { - fn from(inner: [Vec; COLUMNS]) -> Self { + fn from(inner: [Vec; KIMCHI_COLS]) -> Self { Witness { inner } } } diff --git a/utils/src/chunked_polynomial.rs b/utils/src/chunked_polynomial.rs index c8d09c42b3..6f79de09ea 100644 --- a/utils/src/chunked_polynomial.rs +++ b/utils/src/chunked_polynomial.rs @@ -64,6 +64,7 @@ mod tests { let one = Fp::one(); let zeta = one + one; let zeta_n = zeta.square(); + let num_chunks = 4; let res = (one + zeta) * (one + zeta_n + zeta_n * zeta.square() + zeta_n * zeta.square() * zeta.square()); @@ -71,7 +72,10 @@ mod tests { let coeffs = [one, one, one, one, one, one, one, one]; let f = DensePolynomial::from_coefficients_slice(&coeffs); - let eval = f.to_chunked_polynomial(2).linearize(zeta_n).evaluate(&zeta); + let eval = f + .to_chunked_polynomial(num_chunks, 2) + .linearize(zeta_n) + .evaluate(&zeta); assert!(eval == res); } diff --git a/utils/src/dense_polynomial.rs b/utils/src/dense_polynomial.rs index fdb2b9e4a3..72560f1057 100644 --- a/utils/src/dense_polynomial.rs +++ b/utils/src/dense_polynomial.rs @@ -22,8 +22,8 @@ pub trait ExtendedDensePolynomial { fn eval_polynomial(coeffs: &[F], x: F) -> F; /// Convert a polynomial into chunks. - /// Implementors must ensure that the result contains at least 1 chunk. - fn to_chunked_polynomial(&self, size: usize) -> ChunkedPolynomial; + /// Implementors must ensure that the result contains exactly num_chunks. + fn to_chunked_polynomial(&self, num_chunks: usize, size: usize) -> ChunkedPolynomial; } impl ExtendedDensePolynomial for DensePolynomial { @@ -46,20 +46,17 @@ impl ExtendedDensePolynomial for DensePolynomial { DensePolynomial::from_coefficients_slice(coeffs).evaluate(&x) } - fn to_chunked_polynomial(&self, chunk_size: usize) -> ChunkedPolynomial { - // Ensure that there is always at least 1 polynomial in the resulting chunked polynomial. - if self.coeffs.is_empty() { - return ChunkedPolynomial { - polys: vec![DensePolynomial::from_coefficients_vec(vec![])], - size: chunk_size, - }; - } - - let mut chunk_polys: Vec> = vec![]; + fn to_chunked_polynomial(&self, num_chunks: usize, chunk_size: usize) -> ChunkedPolynomial { + let mut chunk_polys: Vec> = Vec::with_capacity(num_chunks); for chunk in self.coeffs.chunks(chunk_size) { chunk_polys.push(DensePolynomial::from_coefficients_slice(chunk)); } + // Pad unused chunks with zeros. + for _ in chunk_polys.len()..num_chunks { + chunk_polys.push(DensePolynomial::from_coefficients_vec(vec![])); + } + ChunkedPolynomial { polys: chunk_polys, size: chunk_size, @@ -83,12 +80,14 @@ mod tests { let one = Fp::one(); let two = one + one; let three = two + one; + let num_chunks = 4; // 1 + x + x^2 + x^3 + x^4 + x^5 + x^6 + x^7 let coeffs = [one, one, one, one, one, one, one, one]; let f = DensePolynomial::from_coefficients_slice(&coeffs); - let evals = f.to_chunked_polynomial(2).evaluate_chunks(two); - for eval in evals.into_iter().take(4) { + let evals = f.to_chunked_polynomial(num_chunks, 2).evaluate_chunks(two); + assert_eq!(evals.len(), num_chunks); + for eval in evals.into_iter().take(num_chunks) { assert!(eval == three); } }