Skip to content

Commit

Permalink
Upstream 0.15.1 (#58)
Browse files Browse the repository at this point in the history
* Support binary image in snake case

*  fix u8 overflow in BagOfCell::serialize()

*  support string in tvm_success

* Improved LatestContractTransactionsCache performance

* updated readme
---------

Co-authored-by: Dmitrii Korchagin <[email protected]>
  • Loading branch information
dbaranovstonfi and Dmitrii Korchagin authored May 29, 2024
1 parent 18fd602 commit 5949f65
Show file tree
Hide file tree
Showing 17 changed files with 329 additions and 174 deletions.
2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "tonlib"
version = "0.15.0"
version = "0.15.1"
edition = "2021"
description = "Rust SDK for The Open Network"
license = "MIT"
Expand Down
11 changes: 9 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,14 @@ Rust SDK for [The Open Network](https://ton.org/)

`tonlib-sys` - https://github.com/ston-fi/tonlib-sys

For macOS must be preinstalled next components:
## Prerequisites

For Linux:
```shell
sudo apt install build-essential cmake libsodium-dev libsecp256k1-dev lz4 liblz4-dev
```

For macOS:
```shell
brew install --cask mactex
brew install readline secp256k1 ccache pkgconfig cmake libsodium
Expand All @@ -40,7 +47,7 @@ To use this library in your Rust application, add the following to your Cargo.to

```toml
[dependencies]
tonlib = "0.14"
tonlib = "0.15"
```

Then, in your Rust code, you can import the library with:
Expand Down
16 changes: 10 additions & 6 deletions src/cell.rs
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,8 @@ mod util;

pub type ArcCell = Arc<Cell>;

pub type SnakeFormattedDict = HashMap<[u8; 32], Vec<u8>>;

#[derive(PartialEq, Eq, Clone, Hash)]
pub struct Cell {
pub data: Vec<u8>,
Expand Down Expand Up @@ -180,7 +182,7 @@ impl Cell {
/// ``` tail#_ {bn:#} b:(bits bn) = SnakeData ~0; ```
///
/// ``` cons#_ {bn:#} {n:#} b:(bits bn) next:^(SnakeData ~n) = SnakeData ~(n + 1); ```
pub fn load_snake_formatted_dict(&self) -> Result<HashMap<[u8; 32], Vec<u8>>, TonCellError> {
pub fn load_snake_formatted_dict(&self) -> Result<SnakeFormattedDict, TonCellError> {
let dict_loader = GenericDictLoader::new(
key_extractor_256bit,
value_extractor_snake_formatted_string,
Expand Down Expand Up @@ -221,12 +223,14 @@ impl Cell {
let mut first_cell = true;
loop {
let mut parser = cell.parser();
let first_byte = parser.load_uint(8)?.to_u32().unwrap();
if first_cell {
let first_byte = parser.load_u8(8)?;

if first_cell && first_byte != 0 {
return Err(TonCellError::boc_deserialization_error(
"Invalid snake format",
));
if first_byte != 0 {
return Err(TonCellError::boc_deserialization_error(
"Invalid snake format",
));
}
}
let remaining_bytes = parser.remaining_bytes();
let mut data = parser.load_bytes(remaining_bytes)?;
Expand Down
30 changes: 26 additions & 4 deletions src/cell/raw.rs
Original file line number Diff line number Diff line change
Expand Up @@ -252,16 +252,17 @@ fn write_raw_cell(
let padding_bits = cell.bit_len % 8;
let full_bytes = padding_bits == 0;
let data = cell.data.as_slice();
let data_len = (cell.bit_len + 7) / 8;
let d2 = data_len as u8 * 2 - if full_bytes { 0 } else { 1 }; //subtract 1 if the last byte is not full
let data_len_bytes = (cell.bit_len + 7) / 8;
// data_len_bytes <= 128 by spec, but d2 must be u8 by spec as well
let d2 = (data_len_bytes * 2 - if full_bytes { 0 } else { 1 }) as u8; //subtract 1 if the last byte is not full

writer.write(8, d1).map_boc_serialization_error()?;
writer.write(8, d2).map_boc_serialization_error()?;
if !full_bytes {
writer
.write_bytes(&data[..data_len - 1])
.write_bytes(&data[..data_len_bytes - 1])
.map_boc_serialization_error()?;
let last_byte = data[data_len - 1];
let last_byte = data[data_len_bytes - 1];
let l = last_byte | 1 << (8 - padding_bits - 1);
writer.write(8, l).map_boc_serialization_error()?;
} else {
Expand Down Expand Up @@ -293,3 +294,24 @@ fn read_var_size(
}
Ok(result)
}

#[cfg(test)]
mod tests {
use super::*;
use tokio_test::assert_ok;

#[test]
fn test_raw_cell_serialize() {
let raw_cell = RawCell {
data: vec![1; 128],
bit_len: 1023,
references: vec![],
max_level: 255,
};
let raw_bag = RawBagOfCells {
cells: vec![raw_cell],
roots: vec![0],
};
let _res = assert_ok!(raw_bag.serialize(false));
}
}
8 changes: 1 addition & 7 deletions src/contract/jetton/master_contract.rs
Original file line number Diff line number Diff line change
Expand Up @@ -94,13 +94,7 @@ fn read_jetton_metadata_content(cell: ArcCell) -> Result<MetaDataContent, TonCel
match content_representation {
0 => {
let dict = cell.reference(0)?.load_snake_formatted_dict()?;
let converted_dict = dict
.into_iter()
.map(|(key, value)| (key, String::from_utf8_lossy(&value).to_string()))
.collect();
Ok(MetaDataContent::Internal {
dict: converted_dict,
}) //todo #79
Ok(MetaDataContent::Internal { dict })
}
1 => {
let remaining_bytes = parser.remaining_bytes();
Expand Down
129 changes: 82 additions & 47 deletions src/contract/latest_transactions_cache.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
use std::collections::LinkedList;
use std::ops::DerefMut;
use std::sync::Arc;

use tokio::sync::Mutex;
Expand All @@ -13,87 +12,101 @@ pub struct LatestContractTransactionsCache {
capacity: usize,
contract_factory: TonContractFactory,
address: TonAddress,

soft_limit: bool,
inner: Mutex<Inner>,
}

struct Inner {
transactions: LinkedList<Arc<RawTransaction>>,
}

impl LatestContractTransactionsCache {
pub fn new(
contract_factory: &TonContractFactory,
contract_address: &TonAddress,
address: &TonAddress,
capacity: usize,
soft_limit: bool,
) -> LatestContractTransactionsCache {
let inner = Mutex::new(Inner {
transactions: LinkedList::new(),
});
LatestContractTransactionsCache {
capacity,
contract_factory: contract_factory.clone(),
address: contract_address.clone(),
address: address.clone(),

soft_limit,
inner: Mutex::new(Inner {
transactions: LinkedList::new(),
}),
inner,
}
}

/// Returns up to `limit` last transactions.
///
/// Returned transactions are sorted from latest to earliest.
pub async fn get(&self, limit: usize) -> Result<Vec<Arc<RawTransaction>>, TonContractError> {
if limit > self.capacity {
return Err(TonContractError::IllegalArgument(format!(
"Transactions cache size requested ({}) must not exceed cache capacity ({})",
limit, self.capacity
)));
}
let mut lock = self.inner.lock().await;
self.sync(lock.deref_mut()).await?;

let mut res = Vec::with_capacity(limit);
for i in lock.transactions.iter().take(limit) {
res.push(i.clone())
let target_sync_tx_id = self.get_latest_tx_id().await?;

let mut inner = self.inner.lock().await;

// check sync status
if inner.is_not_synced_to_tx_id(&target_sync_tx_id) {
inner
.load_new_txs(
&self.contract_factory,
&self.address,
self.soft_limit,
self.capacity,
&target_sync_tx_id,
)
.await?;
}
Ok(res)
let r = inner.fill_txs(limit);
Ok(r)
}

/// Returns up to `capacity` last transactions.
///
/// Returned transactions are sorted from latest to earliest.
pub async fn get_all(&self) -> Result<Vec<Arc<RawTransaction>>, TonContractError> {
self.get(self.capacity).await
}

async fn sync(&self, inner: &mut Inner) -> Result<(), TonContractError> {
// Find out what to sync
async fn get_latest_tx_id(&self) -> Result<InternalTransactionId, TonContractError> {
let state = self
.contract_factory
.get_latest_account_state(&self.address)
.await?;
let last_tx_id = &state.last_transaction_id;
let tx_id = state.last_transaction_id.clone();
Ok(tx_id)
}
}

let synced_tx_id: &InternalTransactionId = inner
.transactions
.front()
.map(|tx| &tx.transaction_id)
.unwrap_or(&NULL_TRANSACTION_ID);
struct Inner {
transactions: LinkedList<Arc<RawTransaction>>,
}

// Load neccessary data
let mut loaded: Vec<Arc<RawTransaction>> = Vec::new();
impl Inner {
async fn load_new_txs(
&mut self,
contract_factory: &TonContractFactory,
address: &TonAddress,
soft_limit: bool,
capacity: usize,
target_sync_tx: &InternalTransactionId,
) -> Result<(), TonContractError> {
let synced_tx_id = self.get_latest_synced_tx_id();
let mut loaded = Vec::new();
let mut finished = false;
let mut next_to_load: InternalTransactionId = last_tx_id.clone();
let mut batch_size: usize = 16;
let mut next_to_load = target_sync_tx.clone();
let mut batch_size = 16;

while !finished && next_to_load.lt != 0 && next_to_load.lt > synced_tx_id.lt {
let maybe_txs = self
.contract_factory
let maybe_txs = contract_factory
.clone()
.client()
.get_raw_transactions_v2(&self.address, &next_to_load, batch_size, false)
.get_raw_transactions_v2(address, &next_to_load, batch_size, false)
.await;
let txs = match maybe_txs {
Ok(txs) => txs,
Err(e) if self.soft_limit => match e {
Err(e) if soft_limit => match e {
TonClientError::TonlibError { code: 500, .. } => {
batch_size /= 2;
if batch_size == 0 {
Expand All @@ -110,39 +123,61 @@ impl LatestContractTransactionsCache {
};

for tx in txs.transactions {
if loaded.len() >= self.capacity || tx.transaction_id.lt <= synced_tx_id.lt {
if loaded.len() >= capacity || tx.transaction_id.lt <= synced_tx_id.lt {
finished = true;
break;
}
loaded.push(Arc::new(tx));
}
next_to_load = txs.previous_transaction_id.clone();
}

// Add loaded transactions
if !loaded.is_empty() {
log::trace!(
"Adding {} new transactions for contract {}",
loaded.len(),
self.address
address
);
}
let txs = &mut self.transactions;
for tx in loaded.iter().rev() {
inner.transactions.push_front(tx.clone());
txs.push_front(tx.clone());
}

// Remove outdated transactions
if inner.transactions.len() > self.capacity {
if txs.len() > capacity {
log::trace!(
"Removing {} outdated transactions for contract {}",
inner.transactions.len() - self.capacity,
self.address
txs.len() - capacity,
address
);
}
while inner.transactions.len() > self.capacity {
inner.transactions.pop_back();
while txs.len() > capacity {
txs.pop_back();
}
log::trace!("Finished sync");

Ok(())
}

fn fill_txs(&self, limit: usize) -> Vec<Arc<RawTransaction>> {
let mut res = Vec::with_capacity(limit);
let txs = &self.transactions;
for i in txs.iter().take(limit) {
res.push(i.clone())
}
res
}

fn get_latest_synced_tx_id(&self) -> &InternalTransactionId {
self.transactions
.front()
.map(|tx| &tx.transaction_id)
.unwrap_or(&NULL_TRANSACTION_ID)
}

fn is_not_synced_to_tx_id(&self, target_sync_tx: &InternalTransactionId) -> bool {
let latest_synced_tx = self.get_latest_synced_tx_id();
latest_synced_tx != target_sync_tx
}
}
8 changes: 1 addition & 7 deletions src/contract/nft/collection_contract.rs
Original file line number Diff line number Diff line change
Expand Up @@ -108,13 +108,7 @@ async fn read_collection_metadata_content(
let dict = reference
.load_snake_formatted_dict()
.map_cell_error("get_collection_data", collection_address)?;
let converted_dict = dict
.into_iter()
.map(|(key, value)| (key, String::from_utf8_lossy(&value).to_string()))
.collect();
Ok(MetaDataContent::Internal {
dict: converted_dict,
}) //todo #79
Ok(MetaDataContent::Internal { dict })
}
// On-chain content layout
// The first byte is 0x00 and the rest is key/value dictionary.
Expand Down
8 changes: 1 addition & 7 deletions src/contract/nft/item_contract.rs
Original file line number Diff line number Diff line change
Expand Up @@ -141,13 +141,7 @@ async fn read_item_metadata_content(
let dict = reference
.load_snake_formatted_dict()
.map_cell_error("get_nft_data", item_address)?;
let converted_dict = dict
.into_iter()
.map(|(key, value)| (key, String::from_utf8_lossy(&value).to_string()))
.collect();
Ok(MetaDataContent::Internal {
dict: converted_dict,
}) //todo #79s
Ok(MetaDataContent::Internal { dict })
}
// Off-chain content layout
// The first byte is 0x01 and the rest is the URI pointing to the JSON document containing the token metadata.
Expand Down
Loading

0 comments on commit 5949f65

Please sign in to comment.