Compare commits
12 Commits
cd371ac580
...
0762052c98
Author | SHA1 | Date | |
---|---|---|---|
![]() |
0762052c98 | ||
![]() |
25832b05de | ||
![]() |
0372b95111 | ||
![]() |
9377a42099 | ||
![]() |
6ea6232e9c | ||
![]() |
73d009a952 | ||
![]() |
bdd100120a | ||
![]() |
81d3d1cb0a | ||
![]() |
e9c8ab98bb | ||
![]() |
76f16a1d98 | ||
![]() |
5e6315ca7c | ||
![]() |
f474d50414 |
@ -9,6 +9,7 @@ crate-type = ["lib", "cdylib"]
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0"
|
||||
async-trait = "0.1"
|
||||
serde = { version = "1.0.188", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
wasm-bindgen = "0.2.91"
|
||||
@ -19,6 +20,9 @@ tsify = { git = "https://github.com/Sosthene00/tsify", branch = "next" }
|
||||
# sdk_common = { path = "../sdk_common" }
|
||||
sdk_common = { git = "https://git.4nkweb.com/4nk/sdk_common.git", branch = "dev" }
|
||||
serde-wasm-bindgen = "0.6.5"
|
||||
wasm-bindgen-futures = "0.4"
|
||||
futures = "0.3"
|
||||
web-sys = { version = "0.3", features = ["Window"] }
|
||||
|
||||
[dev-dependencies]
|
||||
wasm-bindgen-test = "0.3"
|
||||
|
179
src/api.rs
179
src/api.rs
@ -21,10 +21,10 @@ use anyhow::Error as AnyhowError;
|
||||
use anyhow::Result as AnyhowResult;
|
||||
use sdk_common::aes_gcm::aead::{Aead, Payload};
|
||||
use sdk_common::crypto::{
|
||||
decrypt_with_key, encrypt_with_key, generate_key, AeadCore, Aes256Gcm, AnkSharedSecretHash, KeyInit, AAD
|
||||
decrypt_with_key, encrypt_with_key, generate_key, verify_merkle_proof, AeadCore, Aes256Gcm, AnkSharedSecretHash, KeyInit, AAD
|
||||
};
|
||||
use sdk_common::process::{check_tx_for_process_updates, lock_processes, Process, ProcessState};
|
||||
use sdk_common::serialization::{OutPointMemberMap, OutPointProcessMap, ciborium_deserialize as common_deserialize};
|
||||
use sdk_common::serialization::{OutPointMemberMap, OutPointProcessMap};
|
||||
use sdk_common::signature::{AnkHash, AnkMessageHash, AnkValidationNoHash, AnkValidationYesHash, Proof};
|
||||
use sdk_common::sp_client::bitcoin::blockdata::fee_rate;
|
||||
use sdk_common::sp_client::bitcoin::consensus::{deserialize, serialize};
|
||||
@ -65,7 +65,7 @@ use sdk_common::network::{
|
||||
NewTxMessage,
|
||||
};
|
||||
use sdk_common::pcd::{
|
||||
FileBlob, Member, Pcd, PcdCommitments, RoleDefinition, Roles, ValidationRule
|
||||
DataType, FileBlob, Member, Pcd, PcdCommitments, RoleDefinition, Roles, ValidationRule, PCD_VERSION, PcdSerializable
|
||||
};
|
||||
use sdk_common::prd::{AnkPrdHash, Prd, PrdType};
|
||||
use sdk_common::silentpayments::{create_transaction as internal_create_transaction, sign_transaction as internal_sign_tx, TsUnsignedTransaction};
|
||||
@ -73,7 +73,7 @@ use sdk_common::sp_client::{FeeRate, OutputSpendStatus, OwnedOutput, Recipient,
|
||||
use sdk_common::secrets::SecretsStore;
|
||||
|
||||
use crate::user::{lock_local_device, set_new_device, LOCAL_DEVICE};
|
||||
use crate::wallet::{generate_sp_wallet, lock_freezed_utxos};
|
||||
use crate::wallet::{generate_sp_wallet, lock_freezed_utxos, scan_blocks};
|
||||
|
||||
const EMPTYSTATEID: &str = "0000000000000000000000000000000000000000000000000000000000000000";
|
||||
|
||||
@ -253,7 +253,6 @@ pub fn get_address() -> ApiResult<String> {
|
||||
.get_sp_client()
|
||||
.get_receiving_address()
|
||||
.to_string();
|
||||
debug!("{}", address);
|
||||
Ok(address)
|
||||
}
|
||||
|
||||
@ -266,8 +265,8 @@ pub fn get_member() -> ApiResult<Member> {
|
||||
}
|
||||
|
||||
#[wasm_bindgen]
|
||||
pub fn restore_device(device_str: String) -> ApiResult<()> {
|
||||
let device: Device = serde_json::from_str(&device_str)?;
|
||||
pub fn restore_device(device: JsValue) -> ApiResult<()> {
|
||||
let device: Device = serde_wasm_bindgen::from_value(device)?;
|
||||
|
||||
let mut local_device = lock_local_device()?;
|
||||
|
||||
@ -434,10 +433,10 @@ pub fn get_pairing_process_id() -> ApiResult<String> {
|
||||
}
|
||||
|
||||
#[wasm_bindgen]
|
||||
pub fn dump_device() -> ApiResult<String> {
|
||||
pub fn dump_device() -> ApiResult<Device> {
|
||||
let local_device = lock_local_device()?;
|
||||
|
||||
Ok(serde_json::to_string(&local_device.clone())?)
|
||||
Ok(local_device.clone())
|
||||
}
|
||||
|
||||
#[wasm_bindgen]
|
||||
@ -1115,7 +1114,7 @@ pub fn create_transaction(addresses: Vec<String>, fee_rate: u32) -> ApiResult<Ap
|
||||
// We mark the utxos in the inputs as spent to prevent accidental double spends
|
||||
for input in &unsigned_tx.unsigned_tx.as_ref().unwrap().input {
|
||||
if let Some(output) = outputs.get_mut(&input.previous_output) {
|
||||
output.spend_status = OutputSpendStatus::Spent(new_txid.to_string());
|
||||
output.spend_status = OutputSpendStatus::Spent(new_txid.to_byte_array());
|
||||
}
|
||||
}
|
||||
|
||||
@ -1257,18 +1256,19 @@ pub fn update_process(
|
||||
roles.clone()
|
||||
)?;
|
||||
|
||||
// We compare the new state with the previous one
|
||||
let last_state_merkle_root = &prev_state.state_id;
|
||||
// TODO duplicate check is broken
|
||||
// // We compare the new state with the previous one
|
||||
// let last_state_merkle_root = &prev_state.state_id;
|
||||
|
||||
if *last_state_merkle_root == new_state.state_id {
|
||||
return Err(ApiError::new("new proposed state is identical to the previous commited state".to_owned()));
|
||||
}
|
||||
// if *last_state_merkle_root == new_state.state_id {
|
||||
// return Err(ApiError::new("new proposed state is identical to the previous commited state".to_owned()));
|
||||
// }
|
||||
|
||||
// We check that we don't have already a similar concurrent state
|
||||
let concurrent_processes = process.get_latest_concurrent_states()?;
|
||||
if concurrent_processes.iter().any(|p| p.state_id == new_state.state_id) {
|
||||
return Err(ApiError::new("New state already known".to_owned()));
|
||||
}
|
||||
// // We check that we don't have already a similar concurrent state
|
||||
// let concurrent_processes = process.get_latest_concurrent_states()?;
|
||||
// if concurrent_processes.iter().any(|p| p.state_id == new_state.state_id) {
|
||||
// return Err(ApiError::new("New state already known".to_owned()));
|
||||
// }
|
||||
|
||||
let diffs = create_diffs(&lock_local_device()?, &process, &new_state, &members_list)?;
|
||||
|
||||
@ -1279,9 +1279,8 @@ pub fn update_process(
|
||||
for (field, plain_value) in new_attributes.iter() {
|
||||
let hash = new_state.pcd_commitment.get(field).ok_or(anyhow::Error::msg("Missing commitment"))?;
|
||||
let key = generate_key(&mut rng);
|
||||
let cipher = encrypt_with_key(&key, plain_value.as_slice())?;
|
||||
new_state.keys.insert(field.to_owned(), key);
|
||||
let serialized = serde_json::to_string(plain_value)?;
|
||||
let cipher = encrypt_with_key(&key, serialized.as_bytes())?;
|
||||
encrypted_data.insert(hash.to_lower_hex_string(), cipher.to_lower_hex_string());
|
||||
}
|
||||
|
||||
@ -1451,7 +1450,8 @@ pub fn create_update_message(
|
||||
}
|
||||
|
||||
// We shouldn't ever have error here since we already checked above
|
||||
let shared_secret = shared_secrets.get_secret_for_address(sp_address.as_str().try_into()?).unwrap();
|
||||
let shared_secret = shared_secrets.get_secret_for_address(sp_address.as_str().try_into()?)
|
||||
.ok_or(AnyhowError::msg("Failed to retrieve secret".to_owned()))?;
|
||||
|
||||
let cipher = encrypt_with_key(shared_secret.as_byte_array(), prd_msg.as_bytes())?;
|
||||
ciphers.push(cipher.to_lower_hex_string());
|
||||
@ -1709,30 +1709,133 @@ pub fn encode_json(json_data: JsValue) -> ApiResult<Pcd> {
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
#[wasm_bindgen]
|
||||
pub fn encode_value(value: JsValue) -> ApiResult<Vec<u8>> {
|
||||
let res = sdk_common::serialization::ciborium_serialize(&serde_wasm_bindgen::from_value(value)?)?;
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
#[wasm_bindgen]
|
||||
pub fn decode_value(value: Vec<u8>) -> ApiResult<JsValue> {
|
||||
if let Ok(deserialized) = sdk_common::serialization::ciborium_deserialize::<FileBlob>(&value) {
|
||||
let u8IntArray = Uint8Array::from(deserialized.data.as_slice());
|
||||
// Try FileBlob first
|
||||
if let Ok(file_blob) = sdk_common::pcd::FileBlob::deserialize_from_pcd(&value) {
|
||||
let u8IntArray = Uint8Array::from(file_blob.data.as_slice());
|
||||
let res = Object::new();
|
||||
Reflect::set(&res, &JsValue::from_str("type"), &JsValue::from_str(&deserialized.r#type));
|
||||
Reflect::set(&res, &JsValue::from_str("data"), &JsValue::from(u8IntArray));
|
||||
Ok(JsValue::from(res))
|
||||
} else {
|
||||
let res: Value = sdk_common::serialization::ciborium_deserialize(&value)?;
|
||||
return Ok(serde_wasm_bindgen::to_value(&res)?);
|
||||
Reflect::set(&res, &JsValue::from_str("type"), &JsValue::from_str(&file_blob.r#type)).unwrap();
|
||||
Reflect::set(&res, &JsValue::from_str("data"), &JsValue::from(u8IntArray)).unwrap();
|
||||
return Ok(JsValue::from(res));
|
||||
}
|
||||
// Try JSON next
|
||||
if let Ok(json) = serde_json::Value::deserialize_from_pcd(&value) {
|
||||
return Ok(serde_wasm_bindgen::to_value(&json)?);
|
||||
}
|
||||
Err(ApiError::new("Invalid or unsupported PCD data".to_owned()))
|
||||
}
|
||||
|
||||
#[wasm_bindgen]
|
||||
pub fn hash_value(value: JsValue, commited_in: String, label: String) -> ApiResult<String> {
|
||||
let outpoint = OutPoint::from_str(&commited_in)?;
|
||||
let encoded_value = sdk_common::serialization::ciborium_serialize(&serde_wasm_bindgen::from_value::<FileBlob>(value)?)?;
|
||||
let encoded_value = if let Ok(file_blob) = serde_wasm_bindgen::from_value::<FileBlob>(value.clone()) {
|
||||
file_blob.serialize_to_pcd()?
|
||||
} else if let Ok(json) = serde_wasm_bindgen::from_value::<Value>(value) {
|
||||
json.serialize_to_pcd()?
|
||||
} else {
|
||||
return Err(ApiError::new("Invalid or unsupported PCD data".to_owned()));
|
||||
};
|
||||
let hash = AnkPcdHash::from_pcd_value(encoded_value.as_slice(), label.as_bytes(), &outpoint);
|
||||
Ok(hash.as_byte_array().to_lower_hex_string())
|
||||
}
|
||||
|
||||
#[derive(Tsify, Serialize, Deserialize)]
|
||||
#[tsify(into_wasm_abi, from_wasm_abi)]
|
||||
#[allow(non_camel_case_types)]
|
||||
pub struct MerkleProofResult {
|
||||
pub proof: String,
|
||||
pub root: String,
|
||||
pub attribute: String,
|
||||
pub attribute_index: usize,
|
||||
pub total_leaves_count: usize,
|
||||
}
|
||||
|
||||
#[wasm_bindgen]
|
||||
/// Generate a merkle proof for a specific attribute in a process state.
|
||||
///
|
||||
/// This function creates a merkle proof that proves the existence of a specific attribute
|
||||
/// in a given state of a process. The proof can be used to verify that the attribute
|
||||
/// was indeed part of the state without revealing the entire state.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `process_state` - The process state object as a JavaScript value
|
||||
/// * `attribute_name` - The name of the attribute to generate a proof for
|
||||
///
|
||||
/// # Returns
|
||||
/// A MerkleProofResult object containing:
|
||||
/// * `proof` - The merkle proof as a hex string
|
||||
/// * `root` - The merkle root (state_id) as a hex string
|
||||
/// * `attribute` - The attribute name that was proven
|
||||
/// * `attribute_index` - The index of the attribute in the merkle tree
|
||||
/// * `total_leaves_count` - The total number of leaves in the merkle tree
|
||||
///
|
||||
/// # Errors
|
||||
/// * "Failed to deserialize process state" - If the process state cannot be deserialized from JsValue
|
||||
/// * "Attribute not found in state" - If the attribute doesn't exist in the state
|
||||
pub fn get_merkle_proof(process_state: JsValue, attribute_name: String) -> ApiResult<MerkleProofResult> {
|
||||
// Deserialize the process state from JsValue
|
||||
let state: ProcessState = serde_wasm_bindgen::from_value(process_state)
|
||||
.map_err(|_| ApiError::new("Failed to deserialize process state".to_owned()))?;
|
||||
|
||||
// Create merkle tree from the PCD commitments
|
||||
let merkle_tree = state.pcd_commitment.create_merkle_tree()?;
|
||||
|
||||
// Find the index of the attribute in the commitments
|
||||
let attribute_index = state.pcd_commitment.find_index_of(&attribute_name)
|
||||
.ok_or(ApiError::new("Attribute not found in state".to_owned()))?;
|
||||
|
||||
// Generate the merkle proof for the attribute
|
||||
let proof = merkle_tree.proof(&[attribute_index]);
|
||||
|
||||
// Convert the proof to a format that can be serialized to JavaScript
|
||||
let proof_bytes = proof.to_bytes();
|
||||
let proof_hex = proof_bytes.to_lower_hex_string();
|
||||
|
||||
Ok(MerkleProofResult {
|
||||
proof: proof_hex,
|
||||
root: state.state_id.to_lower_hex_string(),
|
||||
attribute: attribute_name,
|
||||
attribute_index: attribute_index,
|
||||
total_leaves_count: merkle_tree.leaves_len(),
|
||||
})
|
||||
}
|
||||
|
||||
#[wasm_bindgen]
|
||||
/// Validate a merkle proof for a specific attribute.
|
||||
///
|
||||
/// This function verifies that a merkle proof is valid and proves the existence
|
||||
/// of a specific attribute in a given state. It checks that the proof correctly
|
||||
/// leads to the claimed root when combined with the attribute hash.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `proof_result` - a JsValue expected to contain a MerkleProofResult with the proof and metadata
|
||||
/// * `hash` - The hash of the attribute data as a hex string (the leaf value)
|
||||
///
|
||||
/// # Returns
|
||||
/// A boolean indicating whether the proof is valid
|
||||
///
|
||||
/// # Errors
|
||||
/// * "serde_wasm_bindgen deserialization error" - If the proof is not a valid MerkleProofResult
|
||||
/// * "Invalid proof format" - If the proof cannot be parsed
|
||||
/// * "Invalid hash format" - If the hash is not a valid 32-byte hex string
|
||||
/// * "Invalid root format" - If the root is not a valid 32-byte hex string
|
||||
pub fn validate_merkle_proof(proof_result: JsValue, hash: String) -> ApiResult<bool> {
|
||||
let proof_result: MerkleProofResult = serde_wasm_bindgen::from_value(proof_result)?;
|
||||
let root_bytes: [u8; 32] = Vec::from_hex(&proof_result.root)?
|
||||
.try_into()
|
||||
.map_err(|_| ApiError::new("Invalid root format".to_owned()))?;
|
||||
|
||||
let proof_bytes = Vec::from_hex(&proof_result.proof)
|
||||
.map_err(|_| ApiError::new("Invalid proof format".to_owned()))?;
|
||||
|
||||
let index = proof_result.attribute_index;
|
||||
let total_leaves_count = proof_result.total_leaves_count;
|
||||
|
||||
let hash_bytes: [u8; 32] = Vec::from_hex(&hash)?.try_into()
|
||||
.map_err(|_| ApiError::new("Invalid hash format".to_owned()))?;
|
||||
|
||||
let res = verify_merkle_proof(&proof_bytes, &root_bytes, index, &hash_bytes, total_leaves_count)?;
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
|
288
src/wallet.rs
288
src/wallet.rs
@ -1,17 +1,17 @@
|
||||
use std::{
|
||||
collections::HashSet,
|
||||
sync::{Mutex, MutexGuard, OnceLock},
|
||||
collections::{HashMap, HashSet}, ops::RangeInclusive, pin::Pin, sync::{atomic::AtomicBool, Arc, Mutex, MutexGuard, OnceLock}, time::Instant
|
||||
};
|
||||
|
||||
use anyhow::Error;
|
||||
use anyhow::{bail, Result, Error};
|
||||
use futures::Stream;
|
||||
use rand::Rng;
|
||||
use sdk_common::sp_client::{
|
||||
bitcoin::{secp256k1::SecretKey, Network, OutPoint},
|
||||
silentpayments::SilentPaymentAddress,
|
||||
SpClient, SpendKey,
|
||||
};
|
||||
use sdk_common::{log, sp_client::{
|
||||
bitcoin::{absolute::Height, hashes::{sha256, Hash}, secp256k1::{PublicKey, SecretKey}, Amount, BlockHash, Network, OutPoint}, silentpayments::SilentPaymentAddress, BlindbitBackend, BlockData, FilterData, OutputSpendStatus, OwnedOutput, SpClient, SpScanner, SpendKey, SpentIndexData, Updater, UtxoData
|
||||
}, updates::StateUpdater};
|
||||
|
||||
use crate::MutexExt;
|
||||
use sdk_common::sp_client::ChainBackendWasm;
|
||||
|
||||
use crate::{user::lock_local_device, MutexExt};
|
||||
|
||||
pub static FREEZED_UTXOS: OnceLock<Mutex<HashSet<OutPoint>>> = OnceLock::new();
|
||||
|
||||
@ -29,3 +29,273 @@ pub fn generate_sp_wallet(network: Network) -> anyhow::Result<SpClient> {
|
||||
network,
|
||||
)
|
||||
}
|
||||
|
||||
pub struct WasmSpScanner<'a> {
|
||||
updater: Box<dyn Updater + Sync>,
|
||||
backend: BlindbitBackend,
|
||||
client: SpClient,
|
||||
keep_scanning: &'a AtomicBool,
|
||||
owned_outpoints: HashSet<OutPoint>,
|
||||
}
|
||||
|
||||
impl<'a> WasmSpScanner<'a> {
|
||||
pub fn new(
|
||||
client: SpClient,
|
||||
updater: Box<dyn Updater + Sync>,
|
||||
backend: BlindbitBackend,
|
||||
owned_outpoints: HashSet<OutPoint>,
|
||||
keep_scanning: &'a AtomicBool,
|
||||
) -> Self {
|
||||
Self {
|
||||
client,
|
||||
updater,
|
||||
backend,
|
||||
owned_outpoints,
|
||||
keep_scanning,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn scan_blocks(
|
||||
&mut self,
|
||||
start: Height,
|
||||
end: Height,
|
||||
dust_limit: Amount,
|
||||
with_cutthrough: bool,
|
||||
) -> Result<()> {
|
||||
if start > end {
|
||||
bail!("bigger start than end: {} > {}", start, end);
|
||||
}
|
||||
|
||||
log::info!("start: {} end: {}", start, end);
|
||||
let start_time: Instant = Instant::now();
|
||||
|
||||
// get block data stream
|
||||
let range = start.to_consensus_u32()..=end.to_consensus_u32();
|
||||
let block_data_stream = self.backend.get_block_data_for_range(range, dust_limit, with_cutthrough);
|
||||
|
||||
// process blocks using block data stream
|
||||
self.process_blocks(start, end, block_data_stream).await?;
|
||||
|
||||
// time elapsed for the scan
|
||||
log::info!(
|
||||
"Blindbit scan complete in {} seconds",
|
||||
start_time.elapsed().as_secs()
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn process_blocks(
|
||||
&mut self,
|
||||
start: Height,
|
||||
end: Height,
|
||||
block_data_stream: Pin<Box<dyn Stream<Item = Result<BlockData>>>>,
|
||||
) -> Result<()> {
|
||||
use sdk_common::sp_client::futures::StreamExt;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
let mut update_time = Instant::now();
|
||||
let mut stream = block_data_stream;
|
||||
|
||||
while let Some(blockdata) = stream.next().await {
|
||||
let blockdata = blockdata?;
|
||||
let blkheight = blockdata.blkheight;
|
||||
let blkhash = blockdata.blkhash;
|
||||
|
||||
// stop scanning and return if interrupted
|
||||
if self.should_interrupt() {
|
||||
self.save_state()?;
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut save_to_storage = false;
|
||||
|
||||
// always save on last block or after 30 seconds since last save
|
||||
if blkheight == end || update_time.elapsed() > Duration::from_secs(30) {
|
||||
save_to_storage = true;
|
||||
}
|
||||
|
||||
let (found_outputs, found_inputs) = self.process_block(blockdata).await?;
|
||||
|
||||
if !found_outputs.is_empty() {
|
||||
save_to_storage = true;
|
||||
self.record_outputs(blkheight, blkhash, found_outputs)?;
|
||||
}
|
||||
|
||||
if !found_inputs.is_empty() {
|
||||
save_to_storage = true;
|
||||
self.record_inputs(blkheight, blkhash, found_inputs)?;
|
||||
}
|
||||
|
||||
// tell the updater we scanned this block
|
||||
self.record_progress(start, blkheight, end)?;
|
||||
|
||||
if save_to_storage {
|
||||
self.save_state()?;
|
||||
update_time = Instant::now();
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn process_block(
|
||||
&mut self,
|
||||
blockdata: BlockData,
|
||||
) -> Result<(HashMap<OutPoint, OwnedOutput>, HashSet<OutPoint>)> {
|
||||
let BlockData {
|
||||
blkheight,
|
||||
tweaks,
|
||||
new_utxo_filter,
|
||||
spent_filter,
|
||||
..
|
||||
} = blockdata;
|
||||
|
||||
let outs = self
|
||||
.process_block_outputs(blkheight, tweaks, new_utxo_filter)
|
||||
.await?;
|
||||
|
||||
// after processing outputs, we add the found outputs to our list
|
||||
self.owned_outpoints.extend(outs.keys());
|
||||
|
||||
let ins = self.process_block_inputs(blkheight, spent_filter).await?;
|
||||
|
||||
// after processing inputs, we remove the found inputs
|
||||
self.owned_outpoints.retain(|item| !ins.contains(item));
|
||||
|
||||
Ok((outs, ins))
|
||||
}
|
||||
|
||||
async fn process_block_outputs(
|
||||
&self,
|
||||
blkheight: Height,
|
||||
tweaks: Vec<PublicKey>,
|
||||
new_utxo_filter: FilterData,
|
||||
) -> Result<HashMap<OutPoint, OwnedOutput>> {
|
||||
// Implementation for processing block outputs
|
||||
// This is a placeholder - you'll need to implement the actual logic
|
||||
Ok(HashMap::new())
|
||||
}
|
||||
|
||||
async fn process_block_inputs(
|
||||
&self,
|
||||
blkheight: Height,
|
||||
spent_filter: FilterData,
|
||||
) -> Result<HashSet<OutPoint>> {
|
||||
// Implementation for processing block inputs
|
||||
// This is a placeholder - you'll need to implement the actual logic
|
||||
Ok(HashSet::new())
|
||||
}
|
||||
|
||||
fn should_interrupt(&self) -> bool {
|
||||
!self
|
||||
.keep_scanning
|
||||
.load(std::sync::atomic::Ordering::Relaxed)
|
||||
}
|
||||
|
||||
fn save_state(&mut self) -> Result<()> {
|
||||
self.updater.save_to_persistent_storage()
|
||||
}
|
||||
|
||||
fn record_outputs(
|
||||
&mut self,
|
||||
height: Height,
|
||||
block_hash: BlockHash,
|
||||
outputs: HashMap<OutPoint, OwnedOutput>,
|
||||
) -> Result<()> {
|
||||
self.updater.record_block_outputs(height, block_hash, outputs)
|
||||
}
|
||||
|
||||
fn record_inputs(
|
||||
&mut self,
|
||||
height: Height,
|
||||
block_hash: BlockHash,
|
||||
inputs: HashSet<OutPoint>,
|
||||
) -> Result<()> {
|
||||
self.updater.record_block_inputs(height, block_hash, inputs)
|
||||
}
|
||||
|
||||
fn record_progress(&mut self, start: Height, current: Height, end: Height) -> Result<()> {
|
||||
self.updater.record_scan_progress(start, current, end)
|
||||
}
|
||||
|
||||
// Override the default get_input_hashes implementation to use owned_outpoints
|
||||
fn get_input_hashes(&self, blkhash: BlockHash) -> Result<HashMap<[u8; 8], OutPoint>> {
|
||||
let mut map: HashMap<[u8; 8], OutPoint> = HashMap::new();
|
||||
|
||||
for outpoint in &self.owned_outpoints {
|
||||
let mut arr = [0u8; 68];
|
||||
arr[..32].copy_from_slice(&outpoint.txid.to_raw_hash().to_byte_array());
|
||||
arr[32..36].copy_from_slice(&outpoint.vout.to_le_bytes());
|
||||
arr[36..].copy_from_slice(&blkhash.to_byte_array());
|
||||
let hash = sha256::Hash::hash(&arr);
|
||||
|
||||
let mut res = [0u8; 8];
|
||||
res.copy_from_slice(&hash[..8]);
|
||||
|
||||
map.insert(res, outpoint.clone());
|
||||
}
|
||||
|
||||
Ok(map)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn scan_blocks(mut n_blocks_to_scan: u32, blindbit_url: &str, tip_height: u32, with_cutthrough: bool) -> anyhow::Result<()> {
|
||||
log::info!("Starting a rescan");
|
||||
|
||||
// Get all the data we need upfront, before any async operations
|
||||
let device = lock_local_device()?;
|
||||
let sp_wallet = device.get_sp_wallet();
|
||||
let scan_height = sp_wallet.get_last_scan();
|
||||
|
||||
// 0 means scan to tip
|
||||
if n_blocks_to_scan == 0 {
|
||||
n_blocks_to_scan = tip_height - scan_height;
|
||||
}
|
||||
|
||||
let start = scan_height + 1;
|
||||
let end = if scan_height + n_blocks_to_scan <= tip_height {
|
||||
scan_height + n_blocks_to_scan
|
||||
} else {
|
||||
tip_height
|
||||
};
|
||||
|
||||
if start > end {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let updater = StateUpdater::new();
|
||||
let backend = BlindbitBackend::new(blindbit_url.to_string())?;
|
||||
|
||||
let owned_outpoints = sp_wallet.get_unspent_outputs().keys().map(|o| *o).collect();
|
||||
|
||||
let keep_scanning = Arc::new(AtomicBool::new(true));
|
||||
|
||||
log::info!("start: {} end: {}", start, end);
|
||||
let start_time = Instant::now();
|
||||
let mut scanner = WasmSpScanner::new(
|
||||
sp_wallet.get_sp_client().clone(),
|
||||
Box::new(updater),
|
||||
backend,
|
||||
owned_outpoints,
|
||||
&keep_scanning,
|
||||
);
|
||||
|
||||
let dust_limit = Amount::from_sat(0); // We don't really have a dust limit for this use case
|
||||
scanner
|
||||
.scan_blocks(
|
||||
Height::from_consensus(start)?,
|
||||
Height::from_consensus(end)?,
|
||||
dust_limit,
|
||||
with_cutthrough,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// time elapsed for the scan
|
||||
log::info!(
|
||||
"Scan complete in {} seconds",
|
||||
start_time.elapsed().as_secs()
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user