diff --git a/.github/workflows/manage-runs.yml b/.github/workflows/manage-runs.yml deleted file mode 100644 index 29bc43aa53..0000000000 --- a/.github/workflows/manage-runs.yml +++ /dev/null @@ -1,47 +0,0 @@ -name: Manage runs -on: - pull_request: - types: - - closed - -jobs: - cancel-merged-or-closed-pr-runs: - name: Cancel runs for merged or closed PRs - runs-on: ubuntu-24.04 - steps: - - uses: octokit/request-action@v2.x - id: get_active_workflows - with: - route: GET /repos/{owner}/{repo}/actions/runs?status=in_progress&event=pull_request - owner: dashpay - repo: platform - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - - name: Extract running workflow ids - id: extract_workflow_ids - run: | - current_branch=${GITHUB_HEAD_REF} - - # loop thru the workflows found & filter out ones that are not on PRs pointing to this branch - workflow_ids=$(echo '${{ steps.get_active_workflows.outputs.data }}' | \ - jq '.workflow_runs | map({id, head_branch})' | \ - jq 'map(select(.head_branch == "'$current_branch'")) | map(.id)' | \ - jq 'join(",")') - - # strip the wrapping quote marks before passing to next step - echo 'WORKFLOW_IDS='$(echo $workflow_ids | tr -d '"') >> $GITHUB_ENV - - - name: Cancel active workflow runs - run: | - for id in ${WORKFLOW_IDS//,/ } - do - echo "Cancelling workflow with id: $id" - - # use curl here as I have no idea how to use a github action in a loop - curl \ - -X POST \ - -H "Accept: application/vnd.github+json" \ - -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \ - https://api.github.com/repos/${{ github.repository }}/actions/runs/$id/cancel - done diff --git a/.github/workflows/prebuild-devcontainers.yml b/.github/workflows/prebuild-devcontainers.yml index c7d4ee28f0..38a1ecd403 100644 --- a/.github/workflows/prebuild-devcontainers.yml +++ b/.github/workflows/prebuild-devcontainers.yml @@ -19,7 +19,7 @@ jobs: build: name: Build and push devcontainer runs-on: ubuntu-24.04 - timeout-minutes: 60 + timeout-minutes: 240 steps: - name: Checkout repo uses: actions/checkout@v4 diff --git a/.github/workflows/tests-build-image.yml b/.github/workflows/tests-build-image.yml index dc6efffa47..e14b7c1cf8 100644 --- a/.github/workflows/tests-build-image.yml +++ b/.github/workflows/tests-build-image.yml @@ -18,6 +18,7 @@ jobs: build-image: name: Build ${{ inputs.name }} image runs-on: ubuntu-24.04 + timeout-minutes: 30 steps: - name: Check out repo uses: actions/checkout@v4 diff --git a/.github/workflows/tests-build-js.yml b/.github/workflows/tests-build-js.yml index 1c73612e22..a36bd91e2c 100644 --- a/.github/workflows/tests-build-js.yml +++ b/.github/workflows/tests-build-js.yml @@ -5,6 +5,7 @@ jobs: build-js: name: Build JS runs-on: ubuntu-24.04 + timeout-minutes: 10 steps: - uses: softwareforgood/check-artifact-v4-existence@v0 id: check-artifact diff --git a/.github/workflows/tests-codeql.yml b/.github/workflows/tests-codeql.yml index ed972e6d9c..9ed7351bf7 100644 --- a/.github/workflows/tests-codeql.yml +++ b/.github/workflows/tests-codeql.yml @@ -5,6 +5,7 @@ jobs: codeql: name: Run Code QL runs-on: ubuntu-24.04 + timeout-minutes: 15 permissions: actions: read contents: read diff --git a/.github/workflows/tests-js-package.yml b/.github/workflows/tests-js-package.yml index 681c27b560..a2bf39a47b 100644 --- a/.github/workflows/tests-js-package.yml +++ b/.github/workflows/tests-js-package.yml @@ -18,6 +18,7 @@ jobs: lint: name: Linting runs-on: ubuntu-24.04 + timeout-minutes: 5 permissions: id-token: write contents: read @@ -40,6 +41,7 @@ jobs: test: name: Tests runs-on: ubuntu-24.04 + timeout-minutes: 15 permissions: id-token: write contents: read diff --git a/.github/workflows/tests-rs-package.yml b/.github/workflows/tests-rs-package.yml index 3696e7e9db..7730485c62 100644 --- a/.github/workflows/tests-rs-package.yml +++ b/.github/workflows/tests-rs-package.yml @@ -110,6 +110,7 @@ jobs: detect_structure_changes: name: Detect immutable structure changes + timeout-minutes: 10 runs-on: ubuntu-24.04 # FIXME: as we use `gh pr view` below, this check can only # run on pull requests. We should find a way to run it diff --git a/.pnp.cjs b/.pnp.cjs index 21f4c62f9a..c642620374 100755 --- a/.pnp.cjs +++ b/.pnp.cjs @@ -14293,7 +14293,7 @@ const RAW_RUNTIME_STATE = ["log-symbols", "npm:4.1.0"],\ ["minimatch", "npm:5.0.1"],\ ["ms", "npm:2.1.3"],\ - ["nanoid", "npm:3.3.3"],\ + ["nanoid", "npm:3.3.8"],\ ["serialize-javascript", "npm:6.0.0"],\ ["strip-json-comments", "npm:3.1.1"],\ ["supports-color", "npm:8.1.1"],\ @@ -14323,7 +14323,7 @@ const RAW_RUNTIME_STATE = ["log-symbols", "npm:4.1.0"],\ ["minimatch", "npm:4.2.1"],\ ["ms", "npm:2.1.3"],\ - ["nanoid", "npm:3.3.1"],\ + ["nanoid", "npm:3.3.8"],\ ["serialize-javascript", "npm:6.0.0"],\ ["strip-json-comments", "npm:3.1.1"],\ ["supports-color", "npm:8.1.1"],\ @@ -14484,17 +14484,10 @@ const RAW_RUNTIME_STATE = }]\ ]],\ ["nanoid", [\ - ["npm:3.3.1", {\ - "packageLocation": "./.yarn/cache/nanoid-npm-3.3.1-bdd760bee0-306f2cb9e4.zip/node_modules/nanoid/",\ + ["npm:3.3.8", {\ + "packageLocation": "./.yarn/cache/nanoid-npm-3.3.8-d22226208b-2d1766606c.zip/node_modules/nanoid/",\ "packageDependencies": [\ - ["nanoid", "npm:3.3.1"]\ - ],\ - "linkType": "HARD"\ - }],\ - ["npm:3.3.3", {\ - "packageLocation": "./.yarn/cache/nanoid-npm-3.3.3-25d865be84-c703ed58a2.zip/node_modules/nanoid/",\ - "packageDependencies": [\ - ["nanoid", "npm:3.3.3"]\ + ["nanoid", "npm:3.3.8"]\ ],\ "linkType": "HARD"\ }]\ diff --git a/.yarn/cache/fsevents-patch-19706e7e35-10.zip b/.yarn/cache/fsevents-patch-19706e7e35-10.zip deleted file mode 100644 index aff1ab12ce..0000000000 Binary files a/.yarn/cache/fsevents-patch-19706e7e35-10.zip and /dev/null differ diff --git a/.yarn/cache/nanoid-npm-3.3.1-bdd760bee0-306f2cb9e4.zip b/.yarn/cache/nanoid-npm-3.3.1-bdd760bee0-306f2cb9e4.zip deleted file mode 100644 index aaa2856372..0000000000 Binary files a/.yarn/cache/nanoid-npm-3.3.1-bdd760bee0-306f2cb9e4.zip and /dev/null differ diff --git a/.yarn/cache/nanoid-npm-3.3.3-25d865be84-c703ed58a2.zip b/.yarn/cache/nanoid-npm-3.3.3-25d865be84-c703ed58a2.zip deleted file mode 100644 index b4130ad872..0000000000 Binary files a/.yarn/cache/nanoid-npm-3.3.3-25d865be84-c703ed58a2.zip and /dev/null differ diff --git a/.yarn/cache/nanoid-npm-3.3.8-d22226208b-2d1766606c.zip b/.yarn/cache/nanoid-npm-3.3.8-d22226208b-2d1766606c.zip new file mode 100644 index 0000000000..ec9e2621c6 Binary files /dev/null and b/.yarn/cache/nanoid-npm-3.3.8-d22226208b-2d1766606c.zip differ diff --git a/Cargo.lock b/Cargo.lock index 6f58c0dcc6..d249acda8f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2167,15 +2167,6 @@ dependencies = [ "ahash 0.7.8", ] -[[package]] -name = "hashbrown" -version = "0.14.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" -dependencies = [ - "ahash 0.8.11", -] - [[package]] name = "hashbrown" version = "0.15.2" @@ -2925,9 +2916,9 @@ checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" [[package]] name = "metrics" -version = "0.23.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "884adb57038347dfbaf2d5065887b6cf4312330dc8e94bc30a1a839bd79d3261" +checksum = "7a7deb012b3b2767169ff203fadb4c6b0b82b947512e5eb9e0b78c2e186ad9e3" dependencies = [ "ahash 0.8.11", "portable-atomic", @@ -2935,9 +2926,9 @@ dependencies = [ [[package]] name = "metrics-exporter-prometheus" -version = "0.15.3" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4f0c8427b39666bf970460908b213ec09b3b350f20c0c2eabcbba51704a08e6" +checksum = "85b6f8152da6d7892ff1b7a1c0fa3f435e92b5918ad67035c3bb432111d9a29b" dependencies = [ "base64 0.22.1", "http-body-util", @@ -2955,15 +2946,14 @@ dependencies = [ [[package]] name = "metrics-util" -version = "0.17.0" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4259040465c955f9f2f1a4a8a16dc46726169bca0f88e8fb2dbeced487c3e828" +checksum = "15b482df36c13dd1869d73d14d28cd4855fbd6cfc32294bee109908a9f4a4ed7" dependencies = [ "crossbeam-epoch", "crossbeam-utils", - "hashbrown 0.14.5", + "hashbrown 0.15.2", "metrics", - "num_cpus", "quanta", "sketches-ddsketch", ] @@ -4463,9 +4453,9 @@ dependencies = [ [[package]] name = "sketches-ddsketch" -version = "0.2.2" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85636c14b73d81f541e525f585c0a2109e6744e1565b5c1668e31c70c10ed65c" +checksum = "c1e9a774a6c28142ac54bb25d25562e6bcf957493a184f15ad4eebccb23e410a" [[package]] name = "slab" diff --git a/package.json b/package.json index 16a30ea3df..46261d4087 100644 --- a/package.json +++ b/package.json @@ -94,7 +94,8 @@ "body-parser": "^1.20.3", "path-to-regexp": "^1.9.0", "cookie": "^0.7.0", - "cross-spawn": "^7.0.5" + "cross-spawn": "^7.0.5", + "nanoid": "^3.3.8" }, "dependencies": { "node-gyp": "^10.0.1" diff --git a/packages/dashmate/configs/defaults/getBaseConfigFactory.js b/packages/dashmate/configs/defaults/getBaseConfigFactory.js index e26e67ca50..771041c457 100644 --- a/packages/dashmate/configs/defaults/getBaseConfigFactory.js +++ b/packages/dashmate/configs/defaults/getBaseConfigFactory.js @@ -310,7 +310,7 @@ export default function getBaseConfigFactory() { tenderdash: { mode: 'full', docker: { - image: 'dashpay/tenderdash:1.3', + image: 'dashpay/tenderdash:1', }, p2p: { host: '0.0.0.0', diff --git a/packages/dashmate/configs/getConfigFileMigrationsFactory.js b/packages/dashmate/configs/getConfigFileMigrationsFactory.js index 5004bd870b..d72facab0f 100644 --- a/packages/dashmate/configs/getConfigFileMigrationsFactory.js +++ b/packages/dashmate/configs/getConfigFileMigrationsFactory.js @@ -1048,6 +1048,13 @@ export default function getConfigFileMigrationsFactory(homeDir, defaultConfigs) }); return configFile; }, + '1.7.0': (configFile) => { + Object.entries(configFile.configs) + .forEach(([, options]) => { + options.platform.drive.tenderdash.docker.image = 'dashpay/tenderdash:1'; + }); + return configFile; + }, }; } diff --git a/packages/dashmate/templates/core/dash.conf.dot b/packages/dashmate/templates/core/dash.conf.dot index cbc0b93373..fb8ba0a785 100644 --- a/packages/dashmate/templates/core/dash.conf.dot +++ b/packages/dashmate/templates/core/dash.conf.dot @@ -108,7 +108,7 @@ devnet={{=it.core.devnet.name}} powtargetspacing={{=it.core.devnet.powTargetSpacing}} minimumdifficultyblocks={{=it.core.devnet.minimumDifficultyBlocks}} highsubsidyblocks=500 -highsubsidyfactor=10 +highsubsidyfactor=100 llmqchainlocks={{=it.core.devnet.llmq.chainLocks}} llmqinstantsenddip0024={{=it.core.devnet.llmq.instantSend}} llmqplatform={{=it.core.devnet.llmq.platform}} diff --git a/packages/rs-dapi-client/Cargo.toml b/packages/rs-dapi-client/Cargo.toml index e74ffcbf56..46fa84ab53 100644 --- a/packages/rs-dapi-client/Cargo.toml +++ b/packages/rs-dapi-client/Cargo.toml @@ -37,5 +37,6 @@ lru = { version = "0.12.3" } serde = { version = "1.0.197", optional = true, features = ["derive"] } serde_json = { version = "1.0.120", optional = true } chrono = { version = "0.4.38", features = ["serde"] } + [dev-dependencies] tokio = { version = "1.40", features = ["macros"] } diff --git a/packages/rs-dapi-client/src/address_list.rs b/packages/rs-dapi-client/src/address_list.rs index 0c21ecc0b1..2f59b22c3b 100644 --- a/packages/rs-dapi-client/src/address_list.rs +++ b/packages/rs-dapi-client/src/address_list.rs @@ -1,12 +1,14 @@ //! Subsystem to manage DAPI nodes. use chrono::Utc; -use dapi_grpc::tonic::codegen::http; use dapi_grpc::tonic::transport::Uri; use rand::{rngs::SmallRng, seq::IteratorRandom, SeedableRng}; -use std::collections::HashSet; +use std::collections::hash_map::Entry; +use std::collections::HashMap; use std::hash::{Hash, Hasher}; +use std::mem; use std::str::FromStr; +use std::sync::{Arc, RwLock}; use std::time::Duration; const DEFAULT_BASE_BAN_PERIOD: Duration = Duration::from_secs(60); @@ -14,54 +16,68 @@ const DEFAULT_BASE_BAN_PERIOD: Duration = Duration::from_secs(60); /// DAPI address. #[derive(Debug, Clone, Eq)] #[cfg_attr(feature = "mocks", derive(serde::Serialize, serde::Deserialize))] -pub struct Address { - ban_count: usize, - banned_until: Option>, - #[cfg_attr(feature = "mocks", serde(with = "http_serde::uri"))] - uri: Uri, -} +pub struct Address(#[cfg_attr(feature = "mocks", serde(with = "http_serde::uri"))] Uri); impl FromStr for Address { type Err = AddressListError; fn from_str(s: &str) -> Result { Uri::from_str(s) - .map(Address::from) - .map_err(AddressListError::from) + .map_err(|e| AddressListError::InvalidAddressUri(e.to_string())) + .map(Address::try_from)? } } impl PartialEq for Address { fn eq(&self, other: &Self) -> bool { - self.uri == other.uri + self.0 == other.0 } } impl PartialEq for Address { fn eq(&self, other: &Uri) -> bool { - self.uri == *other + self.0 == *other } } impl Hash for Address { fn hash(&self, state: &mut H) { - self.uri.hash(state); + self.0.hash(state); } } -impl From for Address { - fn from(uri: Uri) -> Self { - Address { - ban_count: 0, - banned_until: None, - uri, +impl TryFrom for Address { + type Error = AddressListError; + + fn try_from(value: Uri) -> Result { + if value.host().is_none() { + return Err(AddressListError::InvalidAddressUri( + "uri must contain host".to_string(), + )); } + + Ok(Address(value)) } } impl Address { + /// Get [Uri] of a node. + pub fn uri(&self) -> &Uri { + &self.0 + } +} + +/// Address status +/// Contains information about the number of bans and the time until the next ban is lifted. +#[derive(Debug, Default, Clone)] +pub struct AddressStatus { + ban_count: usize, + banned_until: Option>, +} + +impl AddressStatus { /// Ban the [Address] so it won't be available through [AddressList::get_live_address] for some time. - fn ban(&mut self, base_ban_period: &Duration) { + pub fn ban(&mut self, base_ban_period: &Duration) { let coefficient = (self.ban_count as f64).exp(); let ban_period = Duration::from_secs_f64(base_ban_period.as_secs_f64() * coefficient); @@ -75,35 +91,27 @@ impl Address { } /// Clears ban record. - fn unban(&mut self) { + pub fn unban(&mut self) { self.ban_count = 0; self.banned_until = None; } - - /// Get [Uri] of a node. - pub fn uri(&self) -> &Uri { - &self.uri - } } /// [AddressList] errors #[derive(Debug, thiserror::Error)] #[cfg_attr(feature = "mocks", derive(serde::Serialize, serde::Deserialize))] pub enum AddressListError { - /// Specified address is not present in the list - #[error("address {0} not found in the list")] - AddressNotFound(#[cfg_attr(feature = "mocks", serde(with = "http_serde::uri"))] Uri), /// A valid uri is required to create an Address #[error("unable parse address: {0}")] #[cfg_attr(feature = "mocks", serde(skip))] - InvalidAddressUri(#[from] http::uri::InvalidUri), + InvalidAddressUri(String), } /// A structure to manage DAPI addresses to select from /// for [DapiRequest](crate::DapiRequest) execution. #[derive(Debug, Clone)] pub struct AddressList { - addresses: HashSet
, + addresses: Arc>>, base_ban_period: Duration, } @@ -115,7 +123,7 @@ impl Default for AddressList { impl std::fmt::Display for Address { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - self.uri.fmt(f) + self.0.fmt(f) } } @@ -128,83 +136,103 @@ impl AddressList { /// Creates an empty [AddressList] with adjustable base ban time. pub fn with_settings(base_ban_period: Duration) -> Self { AddressList { - addresses: HashSet::new(), + addresses: Arc::new(RwLock::new(HashMap::new())), base_ban_period, } } /// Bans address - pub(crate) fn ban_address(&mut self, address: &Address) -> Result<(), AddressListError> { - if !self.addresses.remove(address) { - return Err(AddressListError::AddressNotFound(address.uri.clone())); - }; + /// Returns false if the address is not in the list. + pub fn ban(&self, address: &Address) -> bool { + let mut guard = self.addresses.write().unwrap(); - let mut banned_address = address.clone(); - banned_address.ban(&self.base_ban_period); + let Some(status) = guard.get_mut(address) else { + return false; + }; - self.addresses.insert(banned_address); + status.ban(&self.base_ban_period); - Ok(()) + true } /// Clears address' ban record - pub(crate) fn unban_address(&mut self, address: &Address) -> Result<(), AddressListError> { - if !self.addresses.remove(address) { - return Err(AddressListError::AddressNotFound(address.uri.clone())); + /// Returns false if the address is not in the list. + pub fn unban(&self, address: &Address) -> bool { + let mut guard = self.addresses.write().unwrap(); + + let Some(status) = guard.get_mut(address) else { + return false; }; - let mut unbanned_address = address.clone(); - unbanned_address.unban(); + status.unban(); + + true + } - self.addresses.insert(unbanned_address); + /// Check if the address is banned. + pub fn is_banned(&self, address: &Address) -> bool { + let guard = self.addresses.read().unwrap(); - Ok(()) + guard + .get(address) + .map(|status| status.is_banned()) + .unwrap_or(false) } /// Adds a node [Address] to [AddressList] /// Returns false if the address is already in the list. pub fn add(&mut self, address: Address) -> bool { - self.addresses.insert(address) + let mut guard = self.addresses.write().unwrap(); + + match guard.entry(address) { + Entry::Occupied(_) => false, + Entry::Vacant(e) => { + e.insert(AddressStatus::default()); + + true + } + } } - // TODO: this is the most simple way to add an address - // however we need to support bulk loading (e.g. providing a network name) - // and also fetch updated from SML. + /// Remove address from the list + /// Returns [AddressStatus] if the address was in the list. + pub fn remove(&mut self, address: &Address) -> Option { + let mut guard = self.addresses.write().unwrap(); + + guard.remove(address) + } + + #[deprecated] + // TODO: Remove in favor of add /// Add a node [Address] to [AddressList] by [Uri]. /// Returns false if the address is already in the list. pub fn add_uri(&mut self, uri: Uri) -> bool { - self.addresses.insert(uri.into()) + self.add(Address::try_from(uri).expect("valid uri")) } /// Randomly select a not banned address. - pub fn get_live_address(&self) -> Option<&Address> { - let mut rng = SmallRng::from_entropy(); + pub fn get_live_address(&self) -> Option
{ + let guard = self.addresses.read().unwrap(); - self.unbanned().into_iter().choose(&mut rng) - } + let mut rng = SmallRng::from_entropy(); - /// Get all addresses that are not banned. - fn unbanned(&self) -> Vec<&Address> { let now = chrono::Utc::now(); - self.addresses + guard .iter() - .filter(|addr| { - addr.banned_until + .filter(|(_, status)| { + status + .banned_until .map(|banned_until| banned_until < now) .unwrap_or(true) }) - .collect() - } - - /// Get number of available, not banned addresses. - pub fn available(&self) -> usize { - self.unbanned().len() + .choose(&mut rng) + .map(|(addr, _)| addr.clone()) } /// Get number of all addresses, both banned and not banned. pub fn len(&self) -> usize { - self.addresses.len() + self.addresses.read().unwrap().len() } /// Check if the list is empty. @@ -212,38 +240,43 @@ impl AddressList { /// Returns false if there is at least one address in the list. /// Banned addresses are also counted. pub fn is_empty(&self) -> bool { - self.addresses.is_empty() + self.addresses.read().unwrap().is_empty() } } -// TODO: Must be changed to FromStr -impl From<&str> for AddressList { - fn from(value: &str) -> Self { - let uri_list: Vec = value +impl IntoIterator for AddressList { + type Item = (Address, AddressStatus); + type IntoIter = std::collections::hash_map::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + let mut guard = self.addresses.write().unwrap(); + + let addresses_map = mem::take(&mut *guard); + + addresses_map.into_iter() + } +} + +impl FromStr for AddressList { + type Err = AddressListError; + + fn from_str(s: &str) -> Result { + let uri_list: Vec
= s .split(',') - .map(|uri| Uri::from_str(uri).expect("invalid uri")) - .collect(); + .map(Address::from_str) + .collect::>()?; - Self::from_iter(uri_list) + Ok(Self::from_iter(uri_list)) } } -impl FromIterator for AddressList { - fn from_iter>(iter: T) -> Self { +impl FromIterator
for AddressList { + fn from_iter>(iter: T) -> Self { let mut address_list = Self::new(); for uri in iter { - address_list.add_uri(uri); + address_list.add(uri); } address_list } } - -impl IntoIterator for AddressList { - type Item = Address; - type IntoIter = std::collections::hash_set::IntoIter
; - - fn into_iter(self) -> Self::IntoIter { - self.addresses.into_iter() - } -} diff --git a/packages/rs-dapi-client/src/dapi_client.rs b/packages/rs-dapi-client/src/dapi_client.rs index c5ef5e67a1..ebca641f40 100644 --- a/packages/rs-dapi-client/src/dapi_client.rs +++ b/packages/rs-dapi-client/src/dapi_client.rs @@ -3,14 +3,15 @@ use backon::{ConstantBuilder, Retryable}; use dapi_grpc::mock::Mockable; use dapi_grpc::tonic::async_trait; -use std::fmt::Debug; +use std::fmt::{Debug, Display}; use std::sync::atomic::AtomicUsize; -use std::sync::{Arc, RwLock}; +use std::sync::Arc; use std::time::Duration; use tracing::Instrument; use crate::address_list::AddressListError; use crate::connection_pool::ConnectionPool; +use crate::request_settings::AppliedRequestSettings; use crate::transport::TransportError; use crate::{ transport::{TransportClient, TransportRequest}, @@ -72,7 +73,7 @@ impl Mockable for DapiClientError { /// Access point to DAPI. #[derive(Debug, Clone)] pub struct DapiClient { - address_list: Arc>, + address_list: AddressList, settings: RequestSettings, pool: ConnectionPool, #[cfg(feature = "dump")] @@ -86,7 +87,7 @@ impl DapiClient { let address_count = 3 * address_list.len(); Self { - address_list: Arc::new(RwLock::new(address_list)), + address_list, settings, pool: ConnectionPool::new(address_count), #[cfg(feature = "dump")] @@ -95,11 +96,74 @@ impl DapiClient { } /// Return the [DapiClient] address list. - pub fn address_list(&self) -> &Arc> { + pub fn address_list(&self) -> &AddressList { &self.address_list } } +/// Ban address in case of retryable error or unban it +/// if it was banned, and the request was successful. +pub fn update_address_ban_status( + address_list: &AddressList, + result: &ExecutionResult, + applied_settings: &AppliedRequestSettings, +) where + E: CanRetry + Display + Debug, +{ + match &result { + Ok(response) => { + // Unban the address if it was banned and node responded successfully this time + if address_list.is_banned(&response.address) { + if address_list.unban(&response.address) { + tracing::debug!(address = ?response.address, "unban successfully responded address {}", response.address); + } else { + // The address might be already removed from the list + // by background process (i.e., SML update), and it's fine. + tracing::debug!( + address = ?response.address, + "unable to unban address {} because it's not in the list anymore", + response.address + ); + } + } + } + Err(error) => { + if error.can_retry() { + if let Some(address) = error.address.as_ref() { + if applied_settings.ban_failed_address { + if address_list.ban(address) { + tracing::warn!( + ?address, + ?error, + "ban address {address} due to error: {error}" + ); + } else { + // The address might be already removed from the list + // by background process (i.e., SML update), and it's fine. + tracing::debug!( + ?address, + ?error, + "unable to ban address {address} because it's not in the list anymore" + ); + } + } else { + tracing::debug!( + ?error, + ?address, + "we should ban the address {address} due to the error but banning is disabled" + ); + } + } else { + tracing::debug!( + ?error, + "we should ban an address due to the error but address is absent" + ); + } + } + } + }; +} + #[async_trait] impl DapiRequestExecutor for DapiClient { /// Execute the [DapiRequest](crate::DapiRequest). @@ -140,18 +204,11 @@ impl DapiRequestExecutor for DapiClient { let retries_counter = Arc::clone(retries_counter_arc_ref); // Try to get an address to initialize transport on: - let address_list = self + let address_result = self .address_list - .read() - .expect("can't get address list for read"); - - let address_result = address_list .get_live_address() - .cloned() .ok_or(DapiClientError::NoAvailableAddresses); - drop(address_list); - let _span = tracing::trace_span!( "execute request", address = ?address_result, @@ -177,7 +234,7 @@ impl DapiRequestExecutor for DapiClient { // `impl Future`, not a `Result` itself. let address = address_result.map_err(|inner| ExecutionError { inner, - retries: retries_counter.load(std::sync::atomic::Ordering::Acquire), + retries: retries_counter.load(std::sync::atomic::Ordering::Relaxed), address: None, })?; @@ -190,85 +247,44 @@ impl DapiRequestExecutor for DapiClient { ) .map_err(|error| ExecutionError { inner: DapiClientError::Transport(error), - retries: retries_counter.load(std::sync::atomic::Ordering::Acquire), + retries: retries_counter.load(std::sync::atomic::Ordering::Relaxed), address: Some(address.clone()), })?; - let response = transport_request + let result = transport_request .execute_transport(&mut transport_client, &applied_settings) .await .map_err(DapiClientError::Transport); - match &response { - Ok(_) => { - // Unban the address if it was banned and node responded successfully this time - if address.is_banned() { - let mut address_list = self - .address_list - .write() - .expect("can't get address list for write"); - - address_list.unban_address(&address).map_err(|error| { - ExecutionError { - inner: DapiClientError::AddressList(error), - retries: retries_counter - .load(std::sync::atomic::Ordering::Acquire), - address: Some(address.clone()), - } - })?; + let retries = retries_counter.load(std::sync::atomic::Ordering::Relaxed); + + let execution_result = result + .map(|inner| { + tracing::trace!(response = ?inner, "received {} response", response_name); + + ExecutionResponse { + inner, + retries, + address: address.clone(), } + }) + .map_err(|inner| { + tracing::debug!(error = ?inner, "received error: {inner}"); - tracing::trace!(?response, "received {} response", response_name); - } - Err(error) => { - if error.can_retry() { - if applied_settings.ban_failed_address { - let mut address_list = self - .address_list - .write() - .expect("can't get address list for write"); - tracing::warn!( - ?address, - ?error, - "received server error, banning address" - ); - address_list.ban_address(&address).map_err(|error| { - ExecutionError { - inner: DapiClientError::AddressList(error), - retries: retries_counter - .load(std::sync::atomic::Ordering::Acquire), - address: Some(address.clone()), - } - })?; - } else { - tracing::debug!( - ?address, - ?error, - "received server error, we should ban the node but banning is disabled" - ); - } - } else { - tracing::debug!( - ?error, - "received server error, most likely the request is invalid" - ); + ExecutionError { + inner, + retries, + address: Some(address.clone()), } - } - }; + }); - let retries = retries_counter.load(std::sync::atomic::Ordering::Acquire); + update_address_ban_status::( + &self.address_list, + &execution_result, + &applied_settings, + ); - response - .map(|inner| ExecutionResponse { - inner, - retries, - address: address.clone(), - }) - .map_err(|inner| ExecutionError { - inner, - retries, - address: Some(address), - }) + execution_result } }; @@ -278,7 +294,7 @@ impl DapiRequestExecutor for DapiClient { .retry(retry_settings) .notify(|error, duration| { let retries_counter = Arc::clone(&retries_counter_arc); - retries_counter.fetch_add(1, std::sync::atomic::Ordering::AcqRel); + retries_counter.fetch_add(1, std::sync::atomic::Ordering::Relaxed); tracing::warn!( ?error, diff --git a/packages/rs-dapi-client/src/lib.rs b/packages/rs-dapi-client/src/lib.rs index f8c03f3956..e820a714a0 100644 --- a/packages/rs-dapi-client/src/lib.rs +++ b/packages/rs-dapi-client/src/lib.rs @@ -16,8 +16,9 @@ pub mod transport; pub use address_list::Address; pub use address_list::AddressList; pub use address_list::AddressListError; +pub use address_list::AddressStatus; pub use connection_pool::ConnectionPool; -pub use dapi_client::{DapiClient, DapiClientError}; +pub use dapi_client::{update_address_ban_status, DapiClient, DapiClientError}; #[cfg(feature = "dump")] pub use dump::DumpData; pub use executor::{ diff --git a/packages/rs-dapi-client/src/transport/grpc.rs b/packages/rs-dapi-client/src/transport/grpc.rs index 853639ca77..62a7590406 100644 --- a/packages/rs-dapi-client/src/transport/grpc.rs +++ b/packages/rs-dapi-client/src/transport/grpc.rs @@ -44,8 +44,8 @@ impl TransportClient for PlatformGrpcClient { .get_or_create(PoolPrefix::Platform, &uri, None, || { match create_channel(uri.clone(), None) { Ok(channel) => Ok(Self::new(channel).into()), - Err(e) => Err(dapi_grpc::tonic::Status::failed_precondition(format!( - "Channel creation failed: {}", + Err(e) => Err(dapi_grpc::tonic::Status::invalid_argument(format!( + "channel creation failed: {}", e ))), } @@ -65,7 +65,7 @@ impl TransportClient for PlatformGrpcClient { Some(settings), || match create_channel(uri.clone(), Some(settings)) { Ok(channel) => Ok(Self::new(channel).into()), - Err(e) => Err(dapi_grpc::tonic::Status::failed_precondition(format!( + Err(e) => Err(dapi_grpc::tonic::Status::invalid_argument(format!( "Channel creation failed: {}", e ))), @@ -81,7 +81,7 @@ impl TransportClient for CoreGrpcClient { .get_or_create(PoolPrefix::Core, &uri, None, || { match create_channel(uri.clone(), None) { Ok(channel) => Ok(Self::new(channel).into()), - Err(e) => Err(dapi_grpc::tonic::Status::failed_precondition(format!( + Err(e) => Err(dapi_grpc::tonic::Status::invalid_argument(format!( "Channel creation failed: {}", e ))), @@ -102,7 +102,7 @@ impl TransportClient for CoreGrpcClient { Some(settings), || match create_channel(uri.clone(), Some(settings)) { Ok(channel) => Ok(Self::new(channel).into()), - Err(e) => Err(dapi_grpc::tonic::Status::failed_precondition(format!( + Err(e) => Err(dapi_grpc::tonic::Status::invalid_argument(format!( "Channel creation failed: {}", e ))), diff --git a/packages/rs-drive-abci/Cargo.toml b/packages/rs-drive-abci/Cargo.toml index 250cfcef80..c4ac1300ff 100644 --- a/packages/rs-drive-abci/Cargo.toml +++ b/packages/rs-drive-abci/Cargo.toml @@ -58,8 +58,8 @@ file-rotate = { version = "0.7.3" } reopen = { version = "1.0.3" } delegate = { version = "0.13" } regex = { version = "1.8.1" } -metrics = { version = "0.23" } -metrics-exporter-prometheus = { version = "0.15", default-features = false, features = [ +metrics = { version = "0.24" } +metrics-exporter-prometheus = { version = "0.16", default-features = false, features = [ "http-listener", ] } url = { version = "2.3.1" } diff --git a/packages/rs-drive-proof-verifier/src/error.rs b/packages/rs-drive-proof-verifier/src/error.rs index 8c0664c825..3fb5825a8c 100644 --- a/packages/rs-drive-proof-verifier/src/error.rs +++ b/packages/rs-drive-proof-verifier/src/error.rs @@ -1,5 +1,4 @@ use dpp::ProtocolError; -use drive::grovedb::operations::proof::GroveDBProof; /// Errors #[derive(Debug, thiserror::Error)] diff --git a/packages/rs-drive-verify-c-binding/Cargo.toml b/packages/rs-drive-verify-c-binding/Cargo.toml index 1f6d9b4f1e..22da440ca7 100644 --- a/packages/rs-drive-verify-c-binding/Cargo.toml +++ b/packages/rs-drive-verify-c-binding/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "rs-drive-verify-c-binding" -version = "1.1.0" +version = "1.6.2" edition = "2021" rust-version.workspace = true diff --git a/packages/rs-platform-version/src/version/mod.rs b/packages/rs-platform-version/src/version/mod.rs index b84e81ada7..b143a1daf1 100644 --- a/packages/rs-platform-version/src/version/mod.rs +++ b/packages/rs-platform-version/src/version/mod.rs @@ -1,5 +1,5 @@ mod protocol_version; -use crate::version::v6::PROTOCOL_VERSION_6; +use crate::version::v7::PROTOCOL_VERSION_7; pub use protocol_version::*; mod consensus_versions; @@ -18,8 +18,9 @@ pub mod v3; pub mod v4; pub mod v5; pub mod v6; +pub mod v7; pub type ProtocolVersion = u32; -pub const LATEST_VERSION: ProtocolVersion = PROTOCOL_VERSION_6; +pub const LATEST_VERSION: ProtocolVersion = PROTOCOL_VERSION_7; pub const INITIAL_PROTOCOL_VERSION: ProtocolVersion = 1; diff --git a/packages/rs-platform-version/src/version/protocol_version.rs b/packages/rs-platform-version/src/version/protocol_version.rs index f3929b7f56..d793384c72 100644 --- a/packages/rs-platform-version/src/version/protocol_version.rs +++ b/packages/rs-platform-version/src/version/protocol_version.rs @@ -21,6 +21,7 @@ use crate::version::v3::PLATFORM_V3; use crate::version::v4::PLATFORM_V4; use crate::version::v5::PLATFORM_V5; use crate::version::v6::PLATFORM_V6; +use crate::version::v7::PLATFORM_V7; use crate::version::ProtocolVersion; pub use versioned_feature_core::*; @@ -43,6 +44,7 @@ pub const PLATFORM_VERSIONS: &[PlatformVersion] = &[ PLATFORM_V4, PLATFORM_V5, PLATFORM_V6, + PLATFORM_V7, ]; #[cfg(feature = "mock-versions")] @@ -51,7 +53,7 @@ pub static PLATFORM_TEST_VERSIONS: OnceLock> = OnceLock::ne #[cfg(feature = "mock-versions")] const DEFAULT_PLATFORM_TEST_VERSIONS: &[PlatformVersion] = &[TEST_PLATFORM_V2, TEST_PLATFORM_V3]; -pub const LATEST_PLATFORM_VERSION: &PlatformVersion = &PLATFORM_V6; +pub const LATEST_PLATFORM_VERSION: &PlatformVersion = &PLATFORM_V7; pub const DESIRED_PLATFORM_VERSION: &PlatformVersion = LATEST_PLATFORM_VERSION; diff --git a/packages/rs-platform-version/src/version/v7.rs b/packages/rs-platform-version/src/version/v7.rs new file mode 100644 index 0000000000..ee53268997 --- /dev/null +++ b/packages/rs-platform-version/src/version/v7.rs @@ -0,0 +1,64 @@ +use crate::version::consensus_versions::ConsensusVersions; +use crate::version::dpp_versions::dpp_asset_lock_versions::v1::DPP_ASSET_LOCK_VERSIONS_V1; +use crate::version::dpp_versions::dpp_contract_versions::v1::CONTRACT_VERSIONS_V1; +use crate::version::dpp_versions::dpp_costs_versions::v1::DPP_COSTS_VERSIONS_V1; +use crate::version::dpp_versions::dpp_document_versions::v1::DOCUMENT_VERSIONS_V1; +use crate::version::dpp_versions::dpp_factory_versions::v1::DPP_FACTORY_VERSIONS_V1; +use crate::version::dpp_versions::dpp_identity_versions::v1::IDENTITY_VERSIONS_V1; +use crate::version::dpp_versions::dpp_method_versions::v1::DPP_METHOD_VERSIONS_V1; +use crate::version::dpp_versions::dpp_state_transition_conversion_versions::v2::STATE_TRANSITION_CONVERSION_VERSIONS_V2; +use crate::version::dpp_versions::dpp_state_transition_method_versions::v1::STATE_TRANSITION_METHOD_VERSIONS_V1; +use crate::version::dpp_versions::dpp_state_transition_serialization_versions::v1::STATE_TRANSITION_SERIALIZATION_VERSIONS_V1; +use crate::version::dpp_versions::dpp_state_transition_versions::v2::STATE_TRANSITION_VERSIONS_V2; +use crate::version::dpp_versions::dpp_validation_versions::v2::DPP_VALIDATION_VERSIONS_V2; +use crate::version::dpp_versions::dpp_voting_versions::v2::VOTING_VERSION_V2; +use crate::version::dpp_versions::DPPVersion; +use crate::version::drive_abci_versions::drive_abci_method_versions::v4::DRIVE_ABCI_METHOD_VERSIONS_V4; +use crate::version::drive_abci_versions::drive_abci_query_versions::v1::DRIVE_ABCI_QUERY_VERSIONS_V1; +use crate::version::drive_abci_versions::drive_abci_structure_versions::v1::DRIVE_ABCI_STRUCTURE_VERSIONS_V1; +use crate::version::drive_abci_versions::drive_abci_validation_versions::v4::DRIVE_ABCI_VALIDATION_VERSIONS_V4; +use crate::version::drive_abci_versions::drive_abci_withdrawal_constants::v2::DRIVE_ABCI_WITHDRAWAL_CONSTANTS_V2; +use crate::version::drive_abci_versions::DriveAbciVersion; +use crate::version::drive_versions::v2::DRIVE_VERSION_V2; +use crate::version::fee::v1::FEE_VERSION1; +use crate::version::protocol_version::PlatformVersion; +use crate::version::system_data_contract_versions::v1::SYSTEM_DATA_CONTRACT_VERSIONS_V1; +use crate::version::system_limits::v1::SYSTEM_LIMITS_V1; +use crate::version::ProtocolVersion; + +pub const PROTOCOL_VERSION_7: ProtocolVersion = 7; + +/// This version adds token support. +//todo: make changes +pub const PLATFORM_V7: PlatformVersion = PlatformVersion { + protocol_version: PROTOCOL_VERSION_7, + drive: DRIVE_VERSION_V2, + drive_abci: DriveAbciVersion { + structs: DRIVE_ABCI_STRUCTURE_VERSIONS_V1, + methods: DRIVE_ABCI_METHOD_VERSIONS_V4, + validation_and_processing: DRIVE_ABCI_VALIDATION_VERSIONS_V4, + withdrawal_constants: DRIVE_ABCI_WITHDRAWAL_CONSTANTS_V2, + query: DRIVE_ABCI_QUERY_VERSIONS_V1, + }, + dpp: DPPVersion { + costs: DPP_COSTS_VERSIONS_V1, + validation: DPP_VALIDATION_VERSIONS_V2, + state_transition_serialization_versions: STATE_TRANSITION_SERIALIZATION_VERSIONS_V1, + state_transition_conversion_versions: STATE_TRANSITION_CONVERSION_VERSIONS_V2, + state_transition_method_versions: STATE_TRANSITION_METHOD_VERSIONS_V1, + state_transitions: STATE_TRANSITION_VERSIONS_V2, + contract_versions: CONTRACT_VERSIONS_V1, + document_versions: DOCUMENT_VERSIONS_V1, + identity_versions: IDENTITY_VERSIONS_V1, + voting_versions: VOTING_VERSION_V2, + asset_lock_versions: DPP_ASSET_LOCK_VERSIONS_V1, + methods: DPP_METHOD_VERSIONS_V1, + factory_versions: DPP_FACTORY_VERSIONS_V1, + }, + system_data_contracts: SYSTEM_DATA_CONTRACT_VERSIONS_V1, + fee_version: FEE_VERSION1, + system_limits: SYSTEM_LIMITS_V1, + consensus: ConsensusVersions { + tenderdash_consensus_version: 1, + }, +}; diff --git a/packages/rs-sdk/examples/read_contract.rs b/packages/rs-sdk/examples/read_contract.rs index 7ac2cc333d..75e1e1214d 100644 --- a/packages/rs-sdk/examples/read_contract.rs +++ b/packages/rs-sdk/examples/read_contract.rs @@ -3,7 +3,7 @@ use std::{num::NonZeroUsize, str::FromStr}; use clap::Parser; use dash_sdk::{mock::provider::GrpcContextProvider, platform::Fetch, Sdk, SdkBuilder}; use dpp::prelude::{DataContract, Identifier}; -use rs_dapi_client::AddressList; +use rs_dapi_client::{Address, AddressList}; use zeroize::Zeroizing; #[derive(clap::Parser, Debug)] @@ -80,14 +80,14 @@ fn setup_sdk(config: &Config) -> Sdk { // Let's build the Sdk. // First, we need an URI of some Dash Platform DAPI host to connect to and use as seed. - let uri = http::Uri::from_str(&format!( - "http://{}:{}", + let address = Address::from_str(&format!( + "https://{}:{}", config.server_address, config.platform_port )) .expect("parse uri"); // Now, we create the Sdk with the wallet and context provider. - let sdk = SdkBuilder::new(AddressList::from_iter([uri])) + let sdk = SdkBuilder::new(AddressList::from_iter([address])) .build() .expect("cannot build sdk"); diff --git a/packages/rs-sdk/src/platform/fetch.rs b/packages/rs-sdk/src/platform/fetch.rs index 80564fbdf2..7fdf5e1974 100644 --- a/packages/rs-sdk/src/platform/fetch.rs +++ b/packages/rs-sdk/src/platform/fetch.rs @@ -195,7 +195,7 @@ where .dapi_client_settings .override_by(settings.unwrap_or_default()); - retry(settings, fut).await.into_inner() + retry(sdk.address_list(), settings, fut).await.into_inner() } /// Fetch single object from Platform. diff --git a/packages/rs-sdk/src/platform/fetch_many.rs b/packages/rs-sdk/src/platform/fetch_many.rs index 360a3559b3..1fcdb1043a 100644 --- a/packages/rs-sdk/src/platform/fetch_many.rs +++ b/packages/rs-sdk/src/platform/fetch_many.rs @@ -252,7 +252,7 @@ where .dapi_client_settings .override_by(settings.unwrap_or_default()); - retry(settings, fut).await.into_inner() + retry(sdk.address_list(), settings, fut).await.into_inner() } /// Fetch multiple objects from Platform by their identifiers. @@ -327,7 +327,7 @@ impl FetchMany for Document { ) -> Result { let document_query: &DocumentQuery = &query.query(sdk.prove())?; - retry(sdk.dapi_client_settings, |settings| async move { + retry(sdk.address_list(), sdk.dapi_client_settings, |settings| async move { let request = document_query.clone(); let ExecutionResponse { diff --git a/packages/rs-sdk/src/platform/fetch_unproved.rs b/packages/rs-sdk/src/platform/fetch_unproved.rs index ac3a682f81..d98d598844 100644 --- a/packages/rs-sdk/src/platform/fetch_unproved.rs +++ b/packages/rs-sdk/src/platform/fetch_unproved.rs @@ -55,7 +55,6 @@ where /// - `settings`: Request settings for the connection to Platform. /// /// ## Returns - /// Returns: /// * `Ok(Some(Self))` when object is found. /// * `Ok(None)` when object is not found. /// * [`Err(Error)`](Error) when an error occurs. @@ -107,7 +106,9 @@ where }; let settings = sdk.dapi_client_settings.override_by(settings); - retry(settings, closure).await.into_inner() + retry(sdk.address_list(), settings, closure) + .await + .into_inner() } } diff --git a/packages/rs-sdk/src/platform/transition.rs b/packages/rs-sdk/src/platform/transition.rs index 4fde48c972..c82a494d2d 100644 --- a/packages/rs-sdk/src/platform/transition.rs +++ b/packages/rs-sdk/src/platform/transition.rs @@ -13,6 +13,7 @@ pub mod transfer_document; mod txid; pub mod update_price_of_document; pub mod vote; +pub mod waitable; pub mod withdraw_from_identity; pub use txid::TxId; diff --git a/packages/rs-sdk/src/platform/transition/broadcast.rs b/packages/rs-sdk/src/platform/transition/broadcast.rs index f41a279b13..f7c3f75d32 100644 --- a/packages/rs-sdk/src/platform/transition/broadcast.rs +++ b/packages/rs-sdk/src/platform/transition/broadcast.rs @@ -52,7 +52,7 @@ impl BroadcastStateTransition for StateTransition { }; // response is empty for a broadcast, result comes from the stream wait for state transition result - retry(retry_settings, factory) + retry(sdk.address_list(), retry_settings, factory) .await .into_inner() .map(|_| ()) @@ -122,7 +122,7 @@ impl BroadcastStateTransition for StateTransition { .wrap_to_execution_result(&response) }; - let future = retry(retry_settings, factory); + let future = retry(sdk.address_list(), retry_settings, factory); // run the future with or without timeout, depending on the settings let wait_timeout = settings.and_then(|s| s.wait_timeout); match wait_timeout { diff --git a/packages/rs-sdk/src/platform/transition/purchase_document.rs b/packages/rs-sdk/src/platform/transition/purchase_document.rs index 1de4aeb43f..530c1c6b83 100644 --- a/packages/rs-sdk/src/platform/transition/purchase_document.rs +++ b/packages/rs-sdk/src/platform/transition/purchase_document.rs @@ -1,26 +1,21 @@ -use std::sync::Arc; - -use crate::{Error, Sdk}; - +use super::broadcast::BroadcastStateTransition; +use super::waitable::Waitable; use crate::platform::transition::put_settings::PutSettings; +use crate::{Error, Sdk}; use dpp::data_contract::document_type::accessors::DocumentTypeV0Getters; use dpp::data_contract::document_type::DocumentType; -use dpp::data_contract::DataContract; -use dpp::document::{Document, DocumentV0Getters}; +use dpp::document::Document; use dpp::fee::Credits; use dpp::identity::signer::Signer; use dpp::identity::IdentityPublicKey; use dpp::prelude::Identifier; use dpp::state_transition::documents_batch_transition::methods::v0::DocumentsBatchTransitionMethodsV0; use dpp::state_transition::documents_batch_transition::DocumentsBatchTransition; -use dpp::state_transition::proof_result::StateTransitionProofResult; use dpp::state_transition::StateTransition; -use super::broadcast::BroadcastStateTransition; - #[async_trait::async_trait] /// A trait for purchasing a document on Platform -pub trait PurchaseDocument { +pub trait PurchaseDocument: Waitable { /// Tries to purchase a document on platform /// Setting settings to `None` sets default connection behavior async fn purchase_document( @@ -34,14 +29,6 @@ pub trait PurchaseDocument { settings: Option, ) -> Result; - /// Waits for the response of a state transition after it has been broadcast - async fn wait_for_response( - &self, - sdk: &Sdk, - state_transition: StateTransition, - data_contract: Arc, - ) -> Result; - /// Tries to purchase a document on platform and waits for the response async fn purchase_document_and_wait_for_response( &self, @@ -50,8 +37,8 @@ pub trait PurchaseDocument { document_type: DocumentType, purchaser_id: Identifier, identity_public_key: IdentityPublicKey, - data_contract: Arc, signer: &S, + settings: Option, ) -> Result; } @@ -98,30 +85,6 @@ impl PurchaseDocument for Document { Ok(transition) } - async fn wait_for_response( - &self, - sdk: &Sdk, - state_transition: StateTransition, - _data_contract: Arc, - ) -> Result { - let result = state_transition.wait_for_response(sdk, None).await?; - - match result { - StateTransitionProofResult::VerifiedDocuments(mut documents) => { - let document = documents - .remove(self.id_ref()) - .ok_or(Error::InvalidProvedResponse( - "did not prove the sent document".to_string(), - ))? - .ok_or(Error::InvalidProvedResponse( - "expected there to actually be a document".to_string(), - ))?; - Ok(document) - } - _ => Err(Error::DapiClientError("proved a non document".to_string())), - } - } - async fn purchase_document_and_wait_for_response( &self, price: Credits, @@ -129,8 +92,8 @@ impl PurchaseDocument for Document { document_type: DocumentType, purchaser_id: Identifier, identity_public_key: IdentityPublicKey, - data_contract: Arc, signer: &S, + settings: Option, ) -> Result { let state_transition = self .purchase_document( @@ -140,18 +103,10 @@ impl PurchaseDocument for Document { purchaser_id, identity_public_key, signer, - None, + settings, ) .await?; - let document = >::wait_for_response( - self, - sdk, - state_transition, - data_contract, - ) - .await?; - - Ok(document) + Self::wait_for_response(sdk, state_transition, settings).await } } diff --git a/packages/rs-sdk/src/platform/transition/put_contract.rs b/packages/rs-sdk/src/platform/transition/put_contract.rs index 9fc0e956ee..9e206f9dd2 100644 --- a/packages/rs-sdk/src/platform/transition/put_contract.rs +++ b/packages/rs-sdk/src/platform/transition/put_contract.rs @@ -10,14 +10,14 @@ use dpp::identity::signer::Signer; use dpp::identity::{IdentityPublicKey, PartialIdentity}; use dpp::state_transition::data_contract_create_transition::methods::DataContractCreateTransitionMethodsV0; use dpp::state_transition::data_contract_create_transition::DataContractCreateTransition; -use dpp::state_transition::proof_result::StateTransitionProofResult; use dpp::state_transition::StateTransition; use super::broadcast::BroadcastStateTransition; +use super::waitable::Waitable; #[async_trait::async_trait] /// A trait for putting a contract to platform -pub trait PutContract { +pub trait PutContract: Waitable { /// Puts a document on platform /// setting settings to `None` sets default connection behavior async fn put_to_platform( @@ -28,19 +28,13 @@ pub trait PutContract { settings: Option, ) -> Result; - /// Waits for the response of a state transition after it has been broadcast - async fn wait_for_response( - &self, - sdk: &Sdk, - state_transition: StateTransition, - ) -> Result; - /// Puts a contract on platform and waits for the confirmation proof async fn put_to_platform_and_wait_for_response( &self, sdk: &Sdk, identity_public_key: IdentityPublicKey, signer: &S, + settings: Option, ) -> Result; } @@ -82,34 +76,17 @@ impl PutContract for DataContract { Ok(transition) } - async fn wait_for_response( - &self, - sdk: &Sdk, - state_transition: StateTransition, - ) -> Result { - let result = state_transition.wait_for_response(sdk, None).await?; - - //todo verify - - match result { - StateTransitionProofResult::VerifiedDataContract(data_contract) => Ok(data_contract), - _ => Err(Error::DapiClientError("proved a non document".to_string())), - } - } - async fn put_to_platform_and_wait_for_response( &self, sdk: &Sdk, identity_public_key: IdentityPublicKey, signer: &S, + settings: Option, ) -> Result { let state_transition = self - .put_to_platform(sdk, identity_public_key, signer, None) + .put_to_platform(sdk, identity_public_key, signer, settings) .await?; - let data_contract = - >::wait_for_response(self, sdk, state_transition).await?; - - Ok(data_contract) + Self::wait_for_response(sdk, state_transition, settings).await } } diff --git a/packages/rs-sdk/src/platform/transition/put_document.rs b/packages/rs-sdk/src/platform/transition/put_document.rs index 6e8617f953..3ef5c5c864 100644 --- a/packages/rs-sdk/src/platform/transition/put_document.rs +++ b/packages/rs-sdk/src/platform/transition/put_document.rs @@ -1,21 +1,19 @@ use super::broadcast::BroadcastStateTransition; +use super::waitable::Waitable; use crate::platform::transition::put_settings::PutSettings; use crate::{Error, Sdk}; use dpp::data_contract::document_type::accessors::DocumentTypeV0Getters; use dpp::data_contract::document_type::DocumentType; -use dpp::data_contract::DataContract; use dpp::document::{Document, DocumentV0Getters}; use dpp::identity::signer::Signer; use dpp::identity::IdentityPublicKey; use dpp::state_transition::documents_batch_transition::methods::v0::DocumentsBatchTransitionMethodsV0; use dpp::state_transition::documents_batch_transition::DocumentsBatchTransition; -use dpp::state_transition::proof_result::StateTransitionProofResult; use dpp::state_transition::StateTransition; -use std::sync::Arc; #[async_trait::async_trait] /// A trait for putting a document to platform -pub trait PutDocument { +pub trait PutDocument: Waitable { /// Puts a document on platform /// setting settings to `None` sets default connection behavior async fn put_to_platform( @@ -28,14 +26,6 @@ pub trait PutDocument { settings: Option, ) -> Result; - /// Waits for the response of a state transition after it has been broadcast - async fn wait_for_response( - &self, - sdk: &Sdk, - state_transition: StateTransition, - data_contract: Arc, - ) -> Result; - /// Puts an identity on platform and waits for the confirmation proof async fn put_to_platform_and_wait_for_response( &self, @@ -43,8 +33,8 @@ pub trait PutDocument { document_type: DocumentType, document_state_transition_entropy: [u8; 32], identity_public_key: IdentityPublicKey, - data_contract: Arc, signer: &S, + settings: Option, ) -> Result; } @@ -89,38 +79,14 @@ impl PutDocument for Document { Ok(transition) } - async fn wait_for_response( - &self, - sdk: &Sdk, - state_transition: StateTransition, - _data_contract: Arc, - ) -> Result { - let result = state_transition.wait_for_response(sdk, None).await?; - //todo verify - match result { - StateTransitionProofResult::VerifiedDocuments(mut documents) => { - let document = documents - .remove(self.id_ref()) - .ok_or(Error::InvalidProvedResponse( - "did not prove the sent document".to_string(), - ))? - .ok_or(Error::InvalidProvedResponse( - "expected there to actually be a document".to_string(), - ))?; - Ok(document) - } - _ => Err(Error::DapiClientError("proved a non document".to_string())), - } - } - async fn put_to_platform_and_wait_for_response( &self, sdk: &Sdk, document_type: DocumentType, document_state_transition_entropy: [u8; 32], identity_public_key: IdentityPublicKey, - _data_contract: Arc, signer: &S, + settings: Option, ) -> Result { let state_transition = self .put_to_platform( @@ -129,24 +95,10 @@ impl PutDocument for Document { document_state_transition_entropy, identity_public_key, signer, - None, + settings, ) .await?; - let result = state_transition.broadcast_and_wait(sdk, None).await?; - match result { - StateTransitionProofResult::VerifiedDocuments(mut documents) => { - let document = documents - .remove(self.id_ref()) - .ok_or(Error::InvalidProvedResponse( - "did not prove the sent document".to_string(), - ))? - .ok_or(Error::InvalidProvedResponse( - "expected there to actually be a document".to_string(), - ))?; - Ok(document) - } - _ => Err(Error::DapiClientError("proved a non document".to_string())), - } + Self::wait_for_response(sdk, state_transition, settings).await } } diff --git a/packages/rs-sdk/src/platform/transition/put_identity.rs b/packages/rs-sdk/src/platform/transition/put_identity.rs index 30276a06a0..ce79b52d81 100644 --- a/packages/rs-sdk/src/platform/transition/put_identity.rs +++ b/packages/rs-sdk/src/platform/transition/put_identity.rs @@ -1,43 +1,39 @@ use crate::platform::transition::broadcast_identity::BroadcastRequestForNewIdentity; -use crate::platform::transition::broadcast_request::BroadcastRequestForStateTransition; -use crate::platform::Fetch; use crate::{Error, Sdk}; -use dapi_grpc::platform::VersionedGrpcResponse; -use dapi_grpc::tonic::Code; +use super::broadcast::BroadcastStateTransition; +use super::put_settings::PutSettings; +use super::waitable::Waitable; use dpp::dashcore::PrivateKey; use dpp::identity::signer::Signer; use dpp::prelude::{AssetLockProof, Identity}; -use drive_proof_verifier::error::ContextProviderError; -use drive_proof_verifier::DataContractProvider; +use dpp::state_transition::StateTransition; -use crate::platform::block_info_from_metadata::block_info_from_metadata; -use dpp::state_transition::proof_result::StateTransitionProofResult; -use drive::drive::Drive; -use rs_dapi_client::transport::TransportError; -use rs_dapi_client::{DapiClientError, DapiRequest, IntoInner, RequestSettings}; - -#[async_trait::async_trait] /// A trait for putting an identity to platform -pub trait PutIdentity { - /// Puts an identity on platform +#[async_trait::async_trait] +pub trait PutIdentity: Waitable { + /// Puts an identity on platform. + /// + /// TODO: Discuss if it should not actually consume self, since it is no longer valid (eg. identity id is changed) async fn put_to_platform( &self, sdk: &Sdk, asset_lock_proof: AssetLockProof, asset_lock_proof_private_key: &PrivateKey, signer: &S, - ) -> Result<(), Error>; - /// Puts an identity on platform and waits for the confirmation proof + settings: Option, + ) -> Result; + + /// Puts an identity on platform and waits for the confirmation proof. async fn put_to_platform_and_wait_for_response( &self, sdk: &Sdk, asset_lock_proof: AssetLockProof, asset_lock_proof_private_key: &PrivateKey, signer: &S, - ) -> Result; + settings: Option, + ) -> Result; } - #[async_trait::async_trait] impl PutIdentity for Identity { async fn put_to_platform( @@ -46,23 +42,18 @@ impl PutIdentity for Identity { asset_lock_proof: AssetLockProof, asset_lock_proof_private_key: &PrivateKey, signer: &S, - ) -> Result<(), Error> { - let (_, request) = self.broadcast_request_for_new_identity( + settings: Option, + ) -> Result { + let (state_transition, _) = self.broadcast_request_for_new_identity( asset_lock_proof, asset_lock_proof_private_key, signer, sdk.version(), )?; - request - .clone() - .execute(sdk, RequestSettings::default()) - .await // TODO: We need better way to handle execution errors - .into_inner()?; - // response is empty for a broadcast, result comes from the stream wait for state transition result - - Ok(()) + state_transition.broadcast(sdk, settings).await?; + Ok(state_transition) } async fn put_to_platform_and_wait_for_response( @@ -71,68 +62,18 @@ impl PutIdentity for Identity { asset_lock_proof: AssetLockProof, asset_lock_proof_private_key: &PrivateKey, signer: &S, + settings: Option, ) -> Result { - let identity_id = asset_lock_proof.create_identifier()?; - let (state_transition, request) = self.broadcast_request_for_new_identity( - asset_lock_proof, - asset_lock_proof_private_key, - signer, - sdk.version(), - )?; - - let response_result = request - .clone() - .execute(sdk, RequestSettings::default()) - .await - .into_inner(); - - match response_result { - Ok(_) => {} - //todo make this more reliable - Err(DapiClientError::Transport(TransportError::Grpc(te))) - if te.code() == Code::AlreadyExists => - { - tracing::debug!( - ?identity_id, - "attempt to create identity that already exists" - ); - let identity = Identity::fetch(sdk, identity_id).await?; - return identity.ok_or(Error::DapiClientError( - "identity was proved to not exist but was said to exist".to_string(), - )); - } - Err(e) => return Err(e.into()), - } - - let request = state_transition.wait_for_state_transition_result_request()?; - // TODO: Implement retry logic - - let response = request - .execute(sdk, RequestSettings::default()) - .await - .into_inner()?; - - let block_info = block_info_from_metadata(response.metadata()?)?; - let proof = response.proof_owned()?; - let context_provider = - sdk.context_provider() - .ok_or(Error::from(ContextProviderError::Config( - "Context provider not initialized".to_string(), - )))?; - - let (_, result) = Drive::verify_state_transition_was_executed_with_proof( - &state_transition, - &block_info, - proof.grovedb_proof.as_slice(), - &context_provider.as_contract_lookup_fn(), - sdk.version(), - )?; - - //todo verify - - match result { - StateTransitionProofResult::VerifiedIdentity(identity) => Ok(identity), - _ => Err(Error::DapiClientError("proved a non identity".to_string())), - } + let state_transition = self + .put_to_platform( + sdk, + asset_lock_proof, + asset_lock_proof_private_key, + signer, + settings, + ) + .await?; + + Self::wait_for_response(sdk, state_transition, settings).await } } diff --git a/packages/rs-sdk/src/platform/transition/top_up_identity.rs b/packages/rs-sdk/src/platform/transition/top_up_identity.rs index c43d8a9f19..10998b6ae7 100644 --- a/packages/rs-sdk/src/platform/transition/top_up_identity.rs +++ b/packages/rs-sdk/src/platform/transition/top_up_identity.rs @@ -1,26 +1,22 @@ -use crate::platform::block_info_from_metadata::block_info_from_metadata; -use crate::platform::transition::broadcast_request::BroadcastRequestForStateTransition; +use super::broadcast::BroadcastStateTransition; +use super::put_settings::PutSettings; +use super::waitable::Waitable; use crate::{Error, Sdk}; -use dapi_grpc::platform::VersionedGrpcResponse; use dpp::dashcore::PrivateKey; -use dpp::identity::Identity; +use dpp::identity::{Identity, PartialIdentity}; use dpp::prelude::{AssetLockProof, UserFeeIncrease}; use dpp::state_transition::identity_topup_transition::methods::IdentityTopUpTransitionMethodsV0; use dpp::state_transition::identity_topup_transition::IdentityTopUpTransition; -use dpp::state_transition::proof_result::StateTransitionProofResult; -use drive::drive::Drive; -use drive_proof_verifier::error::ContextProviderError; -use drive_proof_verifier::DataContractProvider; -use rs_dapi_client::{DapiRequest, IntoInner, RequestSettings}; #[async_trait::async_trait] -pub trait TopUpIdentity { +pub trait TopUpIdentity: Waitable { async fn top_up_identity( &self, sdk: &Sdk, asset_lock_proof: AssetLockProof, asset_lock_proof_private_key: &PrivateKey, user_fee_increase: Option, + settings: Option, ) -> Result; } @@ -32,6 +28,7 @@ impl TopUpIdentity for Identity { asset_lock_proof: AssetLockProof, asset_lock_proof_private_key: &PrivateKey, user_fee_increase: Option, + settings: Option, ) -> Result { let state_transition = IdentityTopUpTransition::try_from_identity( self, @@ -41,46 +38,10 @@ impl TopUpIdentity for Identity { sdk.version(), None, )?; + let identity: PartialIdentity = state_transition.broadcast_and_wait(sdk, settings).await?; - let request = state_transition.broadcast_request_for_state_transition()?; - - request - .clone() - .execute(sdk, RequestSettings::default()) - .await // TODO: We need better way to handle execution errors - .into_inner()?; - - let request = state_transition.wait_for_state_transition_result_request()?; - // TODO: Implement retry logic in wait for state transition result - let response = request - .execute(sdk, RequestSettings::default()) - .await - .into_inner()?; - - let block_info = block_info_from_metadata(response.metadata()?)?; - - let proof = response.proof_owned()?; - let context_provider = - sdk.context_provider() - .ok_or(Error::from(ContextProviderError::Config( - "Context provider not initialized".to_string(), - )))?; - - let (_, result) = Drive::verify_state_transition_was_executed_with_proof( - &state_transition, - &block_info, - proof.grovedb_proof.as_slice(), - &context_provider.as_contract_lookup_fn(), - sdk.version(), - )?; - - match result { - StateTransitionProofResult::VerifiedPartialIdentity(identity) => { - identity.balance.ok_or(Error::DapiClientError( - "expected an identity balance".to_string(), - )) - } - _ => Err(Error::DapiClientError("proved a non identity".to_string())), - } + identity.balance.ok_or(Error::DapiClientError( + "expected an identity balance".to_string(), + )) } } diff --git a/packages/rs-sdk/src/platform/transition/transfer.rs b/packages/rs-sdk/src/platform/transition/transfer.rs index 6d932c5abb..7bd7ddd364 100644 --- a/packages/rs-sdk/src/platform/transition/transfer.rs +++ b/packages/rs-sdk/src/platform/transition/transfer.rs @@ -5,13 +5,14 @@ use crate::platform::transition::broadcast::BroadcastStateTransition; use crate::platform::transition::put_settings::PutSettings; use crate::{Error, Sdk}; use dpp::identity::signer::Signer; -use dpp::identity::{Identity, IdentityPublicKey}; +use dpp::identity::{Identity, IdentityPublicKey, PartialIdentity}; use dpp::state_transition::identity_credit_transfer_transition::methods::IdentityCreditTransferTransitionMethodsV0; use dpp::state_transition::identity_credit_transfer_transition::IdentityCreditTransferTransition; -use dpp::state_transition::proof_result::StateTransitionProofResult; + +use super::waitable::Waitable; #[async_trait::async_trait] -pub trait TransferToIdentity { +pub trait TransferToIdentity: Waitable { /// Function to transfer credits from an identity to another identity. Returns the final /// identity balance. /// @@ -59,15 +60,10 @@ impl TransferToIdentity for Identity { None, )?; - let result = state_transition.broadcast_and_wait(sdk, settings).await?; + let identity: PartialIdentity = state_transition.broadcast_and_wait(sdk, settings).await?; - match result { - StateTransitionProofResult::VerifiedPartialIdentity(identity) => { - identity.balance.ok_or(Error::DapiClientError( - "expected an identity balance after transfer".to_string(), - )) - } - _ => Err(Error::DapiClientError("proved a non identity".to_string())), - } + identity.balance.ok_or(Error::DapiClientError( + "expected an identity balance after transfer".to_string(), + )) } } diff --git a/packages/rs-sdk/src/platform/transition/transfer_document.rs b/packages/rs-sdk/src/platform/transition/transfer_document.rs index a64c76cb95..2106141ae3 100644 --- a/packages/rs-sdk/src/platform/transition/transfer_document.rs +++ b/packages/rs-sdk/src/platform/transition/transfer_document.rs @@ -1,28 +1,21 @@ +use super::waitable::Waitable; use crate::platform::transition::broadcast_request::BroadcastRequestForStateTransition; -use std::sync::Arc; - -use crate::{Error, Sdk}; - -use crate::platform::block_info_from_metadata::block_info_from_metadata; use crate::platform::transition::put_settings::PutSettings; use crate::platform::Identifier; -use dapi_grpc::platform::VersionedGrpcResponse; +use crate::{Error, Sdk}; use dpp::data_contract::document_type::accessors::DocumentTypeV0Getters; use dpp::data_contract::document_type::DocumentType; -use dpp::data_contract::DataContract; use dpp::document::{Document, DocumentV0Getters}; use dpp::identity::signer::Signer; use dpp::identity::IdentityPublicKey; use dpp::state_transition::documents_batch_transition::methods::v0::DocumentsBatchTransitionMethodsV0; use dpp::state_transition::documents_batch_transition::DocumentsBatchTransition; -use dpp::state_transition::proof_result::StateTransitionProofResult; use dpp::state_transition::StateTransition; -use drive::drive::Drive; -use rs_dapi_client::{DapiRequest, IntoInner, RequestSettings}; +use rs_dapi_client::{DapiRequest, IntoInner}; #[async_trait::async_trait] /// A trait for transferring a document on Platform -pub trait TransferDocument { +pub trait TransferDocument: Waitable { /// Transfers a document on platform /// Setting settings to `None` sets default connection behavior async fn transfer_document_to_identity( @@ -35,14 +28,6 @@ pub trait TransferDocument { settings: Option, ) -> Result; - /// Waits for the response of a state transition after it has been broadcast - async fn wait_for_response( - &self, - sdk: &Sdk, - state_transition: StateTransition, - data_contract: Arc, - ) -> Result; - /// Transfers a document on platform and waits for the response async fn transfer_document_to_identity_and_wait_for_response( &self, @@ -50,8 +35,8 @@ pub trait TransferDocument { sdk: &Sdk, document_type: DocumentType, identity_public_key: IdentityPublicKey, - data_contract: Arc, signer: &S, + settings: Option, ) -> Result; } @@ -104,55 +89,14 @@ impl TransferDocument for Document { Ok(transition) } - async fn wait_for_response( - &self, - sdk: &Sdk, - state_transition: StateTransition, - data_contract: Arc, - ) -> Result { - let request = state_transition.wait_for_state_transition_result_request()?; - - let response = request - .execute(sdk, RequestSettings::default()) - .await - .into_inner()?; - - let block_info = block_info_from_metadata(response.metadata()?)?; - - let proof = response.proof_owned()?; - - let (_, result) = Drive::verify_state_transition_was_executed_with_proof( - &state_transition, - &block_info, - proof.grovedb_proof.as_slice(), - &|_| Ok(Some(data_contract.clone())), - sdk.version(), - )?; - - match result { - StateTransitionProofResult::VerifiedDocuments(mut documents) => { - let document = documents - .remove(self.id_ref()) - .ok_or(Error::InvalidProvedResponse( - "did not prove the sent document".to_string(), - ))? - .ok_or(Error::InvalidProvedResponse( - "expected there to actually be a document".to_string(), - ))?; - Ok(document) - } - _ => Err(Error::DapiClientError("proved a non document".to_string())), - } - } - async fn transfer_document_to_identity_and_wait_for_response( &self, recipient_id: Identifier, sdk: &Sdk, document_type: DocumentType, identity_public_key: IdentityPublicKey, - data_contract: Arc, signer: &S, + settings: Option, ) -> Result { let state_transition = self .transfer_document_to_identity( @@ -161,18 +105,10 @@ impl TransferDocument for Document { document_type, identity_public_key, signer, - None, + settings, ) .await?; - let document = >::wait_for_response( - self, - sdk, - state_transition, - data_contract, - ) - .await?; - - Ok(document) + Self::wait_for_response(sdk, state_transition, settings).await } } diff --git a/packages/rs-sdk/src/platform/transition/update_price_of_document.rs b/packages/rs-sdk/src/platform/transition/update_price_of_document.rs index 0f331cde5d..99a5642bf9 100644 --- a/packages/rs-sdk/src/platform/transition/update_price_of_document.rs +++ b/packages/rs-sdk/src/platform/transition/update_price_of_document.rs @@ -1,28 +1,21 @@ -use crate::platform::transition::broadcast_request::BroadcastRequestForStateTransition; -use std::sync::Arc; - use crate::{Error, Sdk}; -use crate::platform::block_info_from_metadata::block_info_from_metadata; +use super::broadcast::BroadcastStateTransition; +use super::waitable::Waitable; use crate::platform::transition::put_settings::PutSettings; -use dapi_grpc::platform::VersionedGrpcResponse; use dpp::data_contract::document_type::accessors::DocumentTypeV0Getters; use dpp::data_contract::document_type::DocumentType; -use dpp::data_contract::DataContract; use dpp::document::{Document, DocumentV0Getters}; use dpp::fee::Credits; use dpp::identity::signer::Signer; use dpp::identity::IdentityPublicKey; use dpp::state_transition::documents_batch_transition::methods::v0::DocumentsBatchTransitionMethodsV0; use dpp::state_transition::documents_batch_transition::DocumentsBatchTransition; -use dpp::state_transition::proof_result::StateTransitionProofResult; use dpp::state_transition::StateTransition; -use drive::drive::Drive; -use rs_dapi_client::{DapiRequest, IntoInner, RequestSettings}; #[async_trait::async_trait] /// A trait for updating the price of a document on Platform -pub trait UpdatePriceOfDocument { +pub trait UpdatePriceOfDocument: Waitable { /// Updates the price of a document on platform /// Setting settings to `None` sets default connection behavior async fn update_price_of_document( @@ -35,14 +28,6 @@ pub trait UpdatePriceOfDocument { settings: Option, ) -> Result; - /// Waits for the response of a state transition after it has been broadcast - async fn wait_for_response( - &self, - sdk: &Sdk, - state_transition: StateTransition, - data_contract: Arc, - ) -> Result; - /// Updates the price of a document on platform and waits for the response async fn update_price_of_document_and_wait_for_response( &self, @@ -50,8 +35,8 @@ pub trait UpdatePriceOfDocument { sdk: &Sdk, document_type: DocumentType, identity_public_key: IdentityPublicKey, - data_contract: Arc, signer: &S, + settings: Option, ) -> Result; } @@ -92,81 +77,24 @@ impl UpdatePriceOfDocument for Document { None, )?; - let request = transition.broadcast_request_for_state_transition()?; - - request - .clone() - .execute(sdk, settings.request_settings) - .await // TODO: We need better way to handle execution errors - .into_inner()?; - // response is empty for a broadcast, result comes from the stream wait for state transition result - + transition.broadcast(sdk, Some(settings)).await?; Ok(transition) } - async fn wait_for_response( - &self, - sdk: &Sdk, - state_transition: StateTransition, - data_contract: Arc, - ) -> Result { - let request = state_transition.wait_for_state_transition_result_request()?; - // TODO: Implement retry logic - let response = request - .execute(sdk, RequestSettings::default()) - .await - .into_inner()?; - - let block_info = block_info_from_metadata(response.metadata()?)?; - - let proof = response.proof_owned()?; - - let (_, result) = Drive::verify_state_transition_was_executed_with_proof( - &state_transition, - &block_info, - proof.grovedb_proof.as_slice(), - &|_| Ok(Some(data_contract.clone())), - sdk.version(), - )?; - - match result { - StateTransitionProofResult::VerifiedDocuments(mut documents) => { - let document = documents - .remove(self.id_ref()) - .ok_or(Error::InvalidProvedResponse( - "did not prove the sent document".to_string(), - ))? - .ok_or(Error::InvalidProvedResponse( - "expected there to actually be a document".to_string(), - ))?; - Ok(document) - } - _ => Err(Error::DapiClientError("proved a non document".to_string())), - } - } - async fn update_price_of_document_and_wait_for_response( &self, price: Credits, sdk: &Sdk, document_type: DocumentType, identity_public_key: IdentityPublicKey, - data_contract: Arc, signer: &S, + settings: Option, ) -> Result { let state_transition = self .update_price_of_document(price, sdk, document_type, identity_public_key, signer, None) .await?; - let document = >::wait_for_response( - self, - sdk, - state_transition, - data_contract, - ) - .await?; - - Ok(document) + Self::wait_for_response(sdk, state_transition, settings).await } } diff --git a/packages/rs-sdk/src/platform/transition/vote.rs b/packages/rs-sdk/src/platform/transition/vote.rs index 5666b8b42d..3734e892f2 100644 --- a/packages/rs-sdk/src/platform/transition/vote.rs +++ b/packages/rs-sdk/src/platform/transition/vote.rs @@ -1,10 +1,8 @@ -use crate::platform::block_info_from_metadata::block_info_from_metadata; use crate::platform::query::VoteQuery; use crate::platform::transition::broadcast_request::BroadcastRequestForStateTransition; use crate::platform::transition::put_settings::PutSettings; use crate::platform::Fetch; use crate::{Error, Sdk}; -use dapi_grpc::platform::VersionedGrpcResponse; use dpp::identifier::MasternodeIdentifiers; use dpp::identity::hash::IdentityPublicKeyHashMethodsV0; use dpp::identity::signer::Signer; @@ -12,16 +10,15 @@ use dpp::identity::IdentityPublicKey; use dpp::prelude::Identifier; use dpp::state_transition::masternode_vote_transition::methods::MasternodeVoteTransitionMethodsV0; use dpp::state_transition::masternode_vote_transition::MasternodeVoteTransition; -use dpp::state_transition::proof_result::StateTransitionProofResult; use dpp::voting::votes::resource_vote::accessors::v0::ResourceVoteGettersV0; use dpp::voting::votes::Vote; -use drive::drive::Drive; -use drive_proof_verifier::{error::ContextProviderError, DataContractProvider}; use rs_dapi_client::{DapiRequest, IntoInner}; +use super::waitable::Waitable; + #[async_trait::async_trait] /// A trait for putting a vote on platform -pub trait PutVote { +pub trait PutVote: Waitable { /// Puts an identity on platform async fn put_to_platform( &self, @@ -129,37 +126,7 @@ impl PutVote for Vote { } } } - - let request = masternode_vote_transition.wait_for_state_transition_result_request()?; - let response = request - .execute(sdk, settings.request_settings) - .await - .into_inner()?; - - let block_info = block_info_from_metadata(response.metadata()?)?; - let proof = response.proof_owned()?; - let context_provider = - sdk.context_provider() - .ok_or(Error::from(ContextProviderError::Config( - "Context provider not initialized".to_string(), - )))?; - - let (_, result) = Drive::verify_state_transition_was_executed_with_proof( - &masternode_vote_transition, - &block_info, - proof.grovedb_proof.as_slice(), - &context_provider.as_contract_lookup_fn(), - sdk.version(), - )?; - - //todo verify - - match result { - StateTransitionProofResult::VerifiedMasternodeVote(vote) => Ok(vote), - _ => Err(Error::DapiClientError( - "proved something that was not a vote".to_string(), - )), - } + Self::wait_for_response(sdk, masternode_vote_transition, Some(settings)).await } } diff --git a/packages/rs-sdk/src/platform/transition/waitable.rs b/packages/rs-sdk/src/platform/transition/waitable.rs new file mode 100644 index 0000000000..a63acb0949 --- /dev/null +++ b/packages/rs-sdk/src/platform/transition/waitable.rs @@ -0,0 +1,131 @@ +use std::collections::BTreeMap; + +use super::broadcast::BroadcastStateTransition; +use super::put_settings::PutSettings; +use crate::platform::Fetch; +use crate::Error; +use crate::Sdk; +use dpp::document::Document; +use dpp::prelude::{DataContract, Identifier, Identity}; +use dpp::state_transition::identity_create_transition::accessors::IdentityCreateTransitionAccessorsV0; +use dpp::state_transition::StateTransition; +use dpp::state_transition::StateTransitionLike; +use dpp::voting::votes::Vote; +use dpp::ProtocolError; + +/// Waitable trait provides a wait to wait for a response of a state transition after it has been broadcast and +/// receive altered objects. +/// +/// This is simple conveniance trait wrapping the [`BroadcastStateTransition::wait_for_response`] method. +#[async_trait::async_trait] +pub trait Waitable: Sized { + async fn wait_for_response( + sdk: &Sdk, + state_transition: StateTransition, + settings: Option, + ) -> Result; +} +#[async_trait::async_trait] +impl Waitable for DataContract { + async fn wait_for_response( + sdk: &Sdk, + state_transition: StateTransition, + settings: Option, + ) -> Result { + state_transition.wait_for_response(sdk, settings).await + } +} + +#[async_trait::async_trait] +impl Waitable for Document { + async fn wait_for_response( + sdk: &Sdk, + state_transition: StateTransition, + settings: Option, + ) -> Result { + let doc_id = if let StateTransition::DocumentsBatch(transition) = &state_transition { + let ids = transition.modified_data_ids(); + if ids.len() != 1 { + return Err(Error::Protocol( + dpp::ProtocolError::InvalidStateTransitionType(format!( + "expected state transition with exactly one document, got {}", + ids.into_iter() + .map(|id| id + .to_string(dpp::platform_value::string_encoding::Encoding::Base58)) + .collect::>() + .join(", ") + )), + )); + } + ids[0] + } else { + return Err(Error::Protocol(ProtocolError::InvalidStateTransitionType( + format!( + "expected state transition to be a DocumentsBatchTransition, got {}", + state_transition.name() + ), + ))); + }; + + let mut documents: BTreeMap> = + state_transition.wait_for_response(sdk, settings).await?; + + let document: Document = documents + .remove(&doc_id) + .ok_or(Error::InvalidProvedResponse( + "did not prove the sent document".to_string(), + ))? + .ok_or(Error::InvalidProvedResponse( + "expected there to actually be a document".to_string(), + ))?; + + Ok(document) + } +} + +#[async_trait::async_trait] +impl Waitable for Identity { + async fn wait_for_response( + sdk: &Sdk, + state_transition: StateTransition, + settings: Option, + ) -> Result { + let result: Result = state_transition.wait_for_response(sdk, settings).await; + + match result { + Ok(identity) => Ok(identity), + // TODO: We need to refactor sdk Error to be able to retrieve gRPC error code and identify conflicts + Err(Error::AlreadyExists(_)) => { + let identity_id = if let StateTransition::IdentityCreate(st) = state_transition { + st.identity_id() + } else { + return Err(Error::Generic(format!( + "expected identity create state transition, got {:?}", + state_transition.name() + ))); + }; + + tracing::debug!( + ?identity_id, + "attempt to create identity that already exists" + ); + let identity = Identity::fetch(sdk, identity_id).await?; + identity.ok_or(Error::DapiClientError( + "identity was proved to not exist but was said to exist".to_string(), + )) + } + Err(e) => Err(e), + } + } +} + +#[async_trait::async_trait] +impl Waitable for Vote { + async fn wait_for_response( + sdk: &Sdk, + state_transition: StateTransition, + settings: Option, + ) -> Result { + state_transition.wait_for_response(sdk, settings).await + } +} diff --git a/packages/rs-sdk/src/platform/types/evonode.rs b/packages/rs-sdk/src/platform/types/evonode.rs index 70bbabee61..2f91e17106 100644 --- a/packages/rs-sdk/src/platform/types/evonode.rs +++ b/packages/rs-sdk/src/platform/types/evonode.rs @@ -25,8 +25,8 @@ use std::fmt::Debug; /// use futures::executor::block_on; /// /// let sdk = Sdk::new_mock(); -/// let uri: http::Uri = "http://127.0.0.1:1".parse().unwrap(); -/// let node = EvoNode::new(uri.into()); +/// let address = "http://127.0.0.1:1".parse().expect("valid address"); +/// let node = EvoNode::new(address); /// let status = block_on(EvoNodeStatus::fetch_unproved(&sdk, node)).unwrap(); /// ``` diff --git a/packages/rs-sdk/src/sdk.rs b/packages/rs-sdk/src/sdk.rs index 3fd570e206..c823df2eae 100644 --- a/packages/rs-sdk/src/sdk.rs +++ b/packages/rs-sdk/src/sdk.rs @@ -50,6 +50,16 @@ pub const DEFAULT_QUORUM_PUBLIC_KEYS_CACHE_SIZE: usize = 100; /// The default identity nonce stale time in seconds pub const DEFAULT_IDENTITY_NONCE_STALE_TIME_S: u64 = 1200; //20 mins +/// The default request settings for the SDK, used when the user does not provide any. +/// +/// Use [SdkBuilder::with_settings] to set custom settings. +const DEFAULT_REQUEST_SETTINGS: RequestSettings = RequestSettings { + retries: Some(3), + timeout: None, + ban_failed_address: None, + connect_timeout: None, +}; + /// a type to represent staleness in seconds pub type StalenessInSeconds = u64; @@ -184,7 +194,7 @@ enum SdkInstance { dapi: Arc>, /// Mock SDK implementation processing mock expectations and responses. mock: Arc>, - + address_list: AddressList, /// Platform version configured for this Sdk version: &'static PlatformVersion, }, @@ -554,19 +564,11 @@ impl Sdk { } /// Return the [DapiClient] address list - pub fn address_list(&self) -> Result { + pub fn address_list(&self) -> &AddressList { match &self.inner { - SdkInstance::Dapi { dapi, version: _ } => { - let address_list_arc = dapi.address_list(); - let address_list_lock = address_list_arc - .read() - .map_err(|e| format!("Failed to read address list: {e}"))?; - Ok(address_list_lock.clone()) - } + SdkInstance::Dapi { dapi, .. } => dapi.address_list(), #[cfg(feature = "mocks")] - SdkInstance::Mock { .. } => { - unimplemented!("mock Sdk does not have address list") - } + SdkInstance::Mock { address_list, .. } => address_list, } } } @@ -705,7 +707,7 @@ pub struct SdkBuilder { /// /// If `None`, a mock client will be created. addresses: Option, - settings: RequestSettings, + settings: Option, network: Network, @@ -755,7 +757,7 @@ impl Default for SdkBuilder { fn default() -> Self { Self { addresses: None, - settings: RequestSettings::default(), + settings: None, network: Network::Dash, core_ip: "".to_string(), core_port: 0, @@ -836,7 +838,7 @@ impl SdkBuilder { /// /// See [`RequestSettings`] for more information. pub fn with_settings(mut self, settings: RequestSettings) -> Self { - self.settings = settings; + self.settings = Some(settings); self } @@ -952,17 +954,22 @@ impl SdkBuilder { pub fn build(self) -> Result { PlatformVersion::set_current(self.version); + let dapi_client_settings = match self.settings { + Some(settings) => DEFAULT_REQUEST_SETTINGS.override_by(settings), + None => DEFAULT_REQUEST_SETTINGS, + }; + let sdk= match self.addresses { // non-mock mode Some(addresses) => { - let dapi = DapiClient::new(addresses, self.settings); + let dapi = DapiClient::new(addresses,dapi_client_settings); #[cfg(feature = "mocks")] let dapi = dapi.dump_dir(self.dump_dir.clone()); #[allow(unused_mut)] // needs to be mutable for #[cfg(feature = "mocks")] let mut sdk= Sdk{ network: self.network, - dapi_client_settings: self.settings, + dapi_client_settings, inner:SdkInstance::Dapi { dapi, version:self.version }, proofs:self.proofs, context_provider: ArcSwapOption::new( self.context_provider.map(Arc::new)), @@ -1025,11 +1032,12 @@ impl SdkBuilder { let mock_sdk = Arc::new(Mutex::new(mock_sdk)); let sdk= Sdk { network: self.network, - dapi_client_settings: self.settings, + dapi_client_settings, inner:SdkInstance::Mock { mock:mock_sdk.clone(), dapi, - version:self.version, + address_list: AddressList::new(), + version: self.version, }, dump_dir: self.dump_dir.clone(), proofs:self.proofs, diff --git a/packages/rs-sdk/src/sync.rs b/packages/rs-sdk/src/sync.rs index 38a878e174..5f5d266669 100644 --- a/packages/rs-sdk/src/sync.rs +++ b/packages/rs-sdk/src/sync.rs @@ -6,13 +6,17 @@ use arc_swap::ArcSwap; use drive_proof_verifier::error::ContextProviderError; -use rs_dapi_client::{CanRetry, ExecutionResult, RequestSettings}; +use rs_dapi_client::{ + update_address_ban_status, AddressList, CanRetry, ExecutionResult, RequestSettings, +}; +use std::fmt::Display; use std::{ fmt::Debug, future::Future, sync::{mpsc::SendError, Arc}, }; use tokio::{runtime::TryCurrentError, sync::Mutex}; + #[derive(Debug, thiserror::Error)] pub enum AsyncError { /// Not running inside tokio runtime @@ -110,6 +114,7 @@ async fn worker( /// /// ## Parameters /// +/// - `address_list` - list of addresses to be used for the requests. /// - `settings` - global settings with any request-specific settings overrides applied. /// - `future_factory_fn` - closure that returns a future that should be retried. It should take [`RequestSettings`] as /// an argument and return [`ExecutionResult`]. @@ -138,8 +143,9 @@ async fn worker( /// } /// #[tokio::main] /// async fn main() { +/// let address_list = rs_dapi_client::AddressList::default(); /// let global_settings = RequestSettings::default(); -/// dash_sdk::sync::retry(global_settings, retry_test_function).await.expect_err("should fail"); +/// dash_sdk::sync::retry(&address_list, global_settings, retry_test_function).await.expect_err("should fail"); /// } /// ``` /// @@ -154,13 +160,14 @@ async fn worker( /// /// - [`::backon`] crate that is used by this function. pub async fn retry( + address_list: &AddressList, settings: RequestSettings, future_factory_fn: FutureFactoryFn, ) -> ExecutionResult where Fut: Future>, FutureFactoryFn: FnMut(RequestSettings) -> Fut, - E: CanRetry + Debug, + E: CanRetry + Display + Debug, { let max_retries = settings.retries.unwrap_or_default(); @@ -187,21 +194,26 @@ where async move { let settings = closure_settings.load_full().clone(); let mut func = inner_fn.lock().await; - (*func)(*settings).await + let result = (*func)(*settings).await; + + // Ban or unban the address based on the result + update_address_ban_status(address_list, &result, &settings.finalize()); + + result } }; - let result= ::backon::Retryable::retry(closure,backoff_strategy) + let result = ::backon::Retryable::retry(closure, backoff_strategy) .when(|e| { if e.can_retry() { - // requests sent for current execution attempt; + // requests sent for current execution attempt; let requests_sent = e.retries + 1; - // requests sent in all preceeding attempts; user expects `settings.retries +1` + // requests sent in all preceeding attempts; user expects `settings.retries +1` retries += requests_sent; let all_requests_sent = retries; - if all_requests_sent <=max_retries { // we account for for initial request + if all_requests_sent <= max_retries { // we account for initial request tracing::warn!(retry = all_requests_sent, max_retries, error=?e, "retrying request"); let new_settings = RequestSettings { retries: Some(max_retries - all_requests_sent), // limit num of retries for lower layer @@ -231,6 +243,7 @@ where #[cfg(test)] mod test { use super::*; + use derive_more::Display; use http::Uri; use rs_dapi_client::ExecutionError; use std::{ @@ -314,7 +327,7 @@ mod test { } } - #[derive(Debug)] + #[derive(Debug, Display)] enum MockError { Generic, } @@ -342,7 +355,7 @@ mod test { Err(ExecutionError { inner: MockError::Generic, retries, - address: Some(Uri::from_static("http://localhost").into()), + address: Some("http://localhost".parse().expect("valid address")), }) } @@ -352,6 +365,8 @@ mod test { for _ in 0..1 { let counter = Arc::new(AtomicUsize::new(0)); + let address_list = AddressList::default(); + // we retry 5 times, and expect 5 retries + 1 initial request let mut global_settings = RequestSettings::default(); global_settings.retries = Some(expected_requests - 1); @@ -361,7 +376,7 @@ mod test { retry_test_function(s, counter) }; - retry(global_settings, closure) + retry(&address_list, global_settings, closure) .await .expect_err("should fail"); diff --git a/packages/rs-sdk/tests/fetch/config.rs b/packages/rs-sdk/tests/fetch/config.rs index c2f8edbc4e..f55484f5ce 100644 --- a/packages/rs-sdk/tests/fetch/config.rs +++ b/packages/rs-sdk/tests/fetch/config.rs @@ -8,7 +8,7 @@ use dpp::{ dashcore::{hashes::Hash, ProTxHash}, prelude::Identifier, }; -use rs_dapi_client::AddressList; +use rs_dapi_client::{Address, AddressList}; use serde::Deserialize; use std::{path::PathBuf, str::FromStr}; use zeroize::Zeroizing; @@ -131,9 +131,12 @@ impl Config { false => "http", }; - let address: String = format!("{}://{}:{}", scheme, self.platform_host, self.platform_port); + let address: Address = + format!("{}://{}:{}", scheme, self.platform_host, self.platform_port) + .parse() + .expect("valid address"); - AddressList::from_iter(vec![http::Uri::from_str(&address).expect("valid uri")]) + AddressList::from_iter([address]) } /// Create new SDK instance diff --git a/packages/rs-sdk/tests/fetch/evonode.rs b/packages/rs-sdk/tests/fetch/evonode.rs index 0d35d5be9f..b2521ba864 100644 --- a/packages/rs-sdk/tests/fetch/evonode.rs +++ b/packages/rs-sdk/tests/fetch/evonode.rs @@ -5,6 +5,7 @@ use dash_sdk::platform::{types::evonode::EvoNode, FetchUnproved}; use dpp::dashcore::{hashes::Hash, ProTxHash}; use drive_proof_verifier::types::EvoNodeStatus; use http::Uri; +use rs_dapi_client::Address; use std::time::Duration; /// Given some existing evonode URIs, WHEN we connect to them, THEN we get status. use tokio::time::timeout; @@ -16,9 +17,7 @@ async fn test_evonode_status() { let cfg = Config::new(); let sdk = cfg.setup_api("test_evonode_status").await; - let addresses = cfg.address_list(); - - for address in addresses { + for (address, _status) in cfg.address_list() { let node = EvoNode::new(address.clone()); match timeout( Duration::from_secs(3), @@ -33,8 +32,9 @@ async fn test_evonode_status() { status.chain.latest_block_height > 0, "latest block height must be positive" ); - assert!( - status.node.pro_tx_hash.unwrap_or_default().len() == ProTxHash::LEN, + assert_eq!( + status.node.pro_tx_hash.unwrap_or_default().len(), + ProTxHash::LEN, "latest block hash must be non-empty" ); // Add more specific assertions based on expected status properties @@ -61,11 +61,11 @@ async fn test_evonode_status_refused() { let cfg = Config::new(); let sdk = cfg.setup_api("test_evonode_status_refused").await; - let uri: Uri = "http://127.0.0.1:1".parse().unwrap(); + let address: Address = "http://127.0.0.1:1".parse().expect("valid address"); - let node = EvoNode::new(uri.clone().into()); + let node = EvoNode::new(address.clone()); let result = EvoNodeStatus::fetch_unproved(&sdk, node).await; - tracing::debug!(?result, ?uri, "evonode status"); + tracing::debug!(?result, ?address, "evonode status"); assert!(result.is_err()); } diff --git a/packages/rs-sdk/tests/vectors/test_evonode_status_refused/msg_EvoNode_6db392ff1869b56ecc7de9ace5864123671ed14d3f0c537aa8e878d24e529de5.json b/packages/rs-sdk/tests/vectors/test_evonode_status/msg_EvoNode_dae36baf8dec4f117f97a27099eb28ff908ec0406a4ac48fff5727a9b9a4ee57.json similarity index 58% rename from packages/rs-sdk/tests/vectors/test_evonode_status_refused/msg_EvoNode_6db392ff1869b56ecc7de9ace5864123671ed14d3f0c537aa8e878d24e529de5.json rename to packages/rs-sdk/tests/vectors/test_evonode_status/msg_EvoNode_dae36baf8dec4f117f97a27099eb28ff908ec0406a4ac48fff5727a9b9a4ee57.json index c80da24adb..c2bdd96612 100644 Binary files a/packages/rs-sdk/tests/vectors/test_evonode_status_refused/msg_EvoNode_6db392ff1869b56ecc7de9ace5864123671ed14d3f0c537aa8e878d24e529de5.json and b/packages/rs-sdk/tests/vectors/test_evonode_status/msg_EvoNode_dae36baf8dec4f117f97a27099eb28ff908ec0406a4ac48fff5727a9b9a4ee57.json differ diff --git a/packages/rs-sdk/tests/vectors/test_evonode_status/msg_EvoNode_fbdf15806b1160a9fb482d5663371cdde55f94897dcf9d905573b01fe445fbc9.json b/packages/rs-sdk/tests/vectors/test_evonode_status/msg_EvoNode_fbdf15806b1160a9fb482d5663371cdde55f94897dcf9d905573b01fe445fbc9.json deleted file mode 100644 index e51843cf30..0000000000 Binary files a/packages/rs-sdk/tests/vectors/test_evonode_status/msg_EvoNode_fbdf15806b1160a9fb482d5663371cdde55f94897dcf9d905573b01fe445fbc9.json and /dev/null differ diff --git a/packages/rs-sdk/tests/vectors/test_evonode_status_refused/msg_EvoNode_7a8ca78c81edf0322718e172f59dab90acb35dbe92b5072c67ae42b121a30dae.json b/packages/rs-sdk/tests/vectors/test_evonode_status_refused/msg_EvoNode_7a8ca78c81edf0322718e172f59dab90acb35dbe92b5072c67ae42b121a30dae.json new file mode 100644 index 0000000000..a72158ecd4 Binary files /dev/null and b/packages/rs-sdk/tests/vectors/test_evonode_status_refused/msg_EvoNode_7a8ca78c81edf0322718e172f59dab90acb35dbe92b5072c67ae42b121a30dae.json differ diff --git a/scripts/configure_test_suite_network.sh b/scripts/configure_test_suite_network.sh index 54e6f99349..498e9d2d03 100755 --- a/scripts/configure_test_suite_network.sh +++ b/scripts/configure_test_suite_network.sh @@ -66,7 +66,7 @@ else CERT_FLAG="" ST_EXECUTION_INTERVAL=15000 fi -SKIP_SYNC_BEFORE_HEIGHT=$(curl -s $INSIGHT_URL | jq '.height - 200') +SKIP_SYNC_BEFORE_HEIGHT=4800 # $(curl -s $INSIGHT_URL | jq '.height - 200') # check variables are not empty if [ -z "$FAUCET_ADDRESS" ] || \ diff --git a/yarn.lock b/yarn.lock index e9661201c4..0119fa898d 100644 --- a/yarn.lock +++ b/yarn.lock @@ -11386,21 +11386,12 @@ __metadata: languageName: node linkType: hard -"nanoid@npm:3.3.1": - version: 3.3.1 - resolution: "nanoid@npm:3.3.1" +"nanoid@npm:^3.3.8": + version: 3.3.8 + resolution: "nanoid@npm:3.3.8" bin: nanoid: bin/nanoid.cjs - checksum: 306f2cb9e4dcfb94738b09de9dc63839a37db33626f66b24dbcc8f66d4b91784645794a7c4f250d629e4d66f5385164c6748c58ac5b7c95217e9e048590efbe4 - languageName: node - linkType: hard - -"nanoid@npm:3.3.3": - version: 3.3.3 - resolution: "nanoid@npm:3.3.3" - bin: - nanoid: bin/nanoid.cjs - checksum: c703ed58a234b68245a8a4826dd25c1453a9017d34fa28bc58e7aa8247de87d854582fa2209d7aee04084cff9ce150be8fd30300abe567dc615d4e8e735f2d99 + checksum: 2d1766606cf0d6f47b6f0fdab91761bb81609b2e3d367027aff45e6ee7006f660fb7e7781f4a34799fe6734f1268eeed2e37a5fdee809ade0c2d4eb11b0f9c40 languageName: node linkType: hard