|
1 | | -/// Example how to limit blob requests by hash and node id, and to add |
2 | | -/// throttling or limiting the maximum number of connections. |
| 1 | +/// Example how to use compression with iroh-blobs |
3 | 2 | /// |
4 | | -/// Limiting is done via a fn that returns an EventSender and internally |
5 | | -/// makes liberal use of spawn to spawn background tasks. |
6 | | -/// |
7 | | -/// This is fine, since the tasks will terminate as soon as the [BlobsProtocol] |
8 | | -/// instance holding the [EventSender] will be dropped. But for production |
9 | | -/// grade code you might nevertheless put the tasks into a [tokio::task::JoinSet] or |
10 | | -/// [n0_future::FuturesUnordered]. |
| 3 | +/// We create a derived protocol that compresses both requests and responses using lz4 |
| 4 | +/// or any other compression algorithm supported by async-compression. |
11 | 5 | mod common; |
12 | 6 | use std::{fmt::Debug, path::PathBuf}; |
13 | 7 |
|
@@ -211,14 +205,14 @@ async fn main() -> Result<()> { |
211 | 205 | Args::Get { ticket, target } => { |
212 | 206 | let store = MemStore::new(); |
213 | 207 | let conn = endpoint |
214 | | - .connect(ticket.node_addr().clone(), &lz4::Compression::ALPN) |
| 208 | + .connect(ticket.node_addr().clone(), lz4::Compression::ALPN) |
215 | 209 | .await?; |
216 | 210 | let connection_id = conn.stable_id() as u64; |
217 | 211 | let (send, recv) = conn.open_bi().await?; |
218 | 212 | let send = compression.send_stream(send); |
219 | 213 | let recv = compression.recv_stream(recv); |
220 | 214 | let sp = StreamPair::new(connection_id, recv, send); |
221 | | - let stats = store.remote().fetch(sp, ticket.hash_and_format()).await?; |
| 215 | + let _stats = store.remote().fetch(sp, ticket.hash_and_format()).await?; |
222 | 216 | if let Some(target) = target { |
223 | 217 | let size = store.export(ticket.hash(), &target).await?; |
224 | 218 | println!("Wrote {} bytes to {}", size, target.display()); |
|
0 commit comments