From 89917d4bb665bd8336d4d8fcc147b2f643864d36 Mon Sep 17 00:00:00 2001 From: Slava <20563034+veaceslavdoina@users.noreply.github.com> Date: Wed, 3 Jul 2024 20:20:53 +0300 Subject: [PATCH] Release v0.1.3 (#856) --- .github/workflows/nim-matrix.yml | 2 +- Makefile | 2 +- codex/erasure/asyncbackend.nim | 18 +++++- codex/erasure/erasure.nim | 16 ++++- codex/node.nim | 9 +++ codex/rest/api.nim | 18 +++++- tests/codex/testerasure.nim | 19 ++++++ tests/integration/codexclient.nim | 4 +- tests/integration/testpurchasing.nim | 29 +++------ tests/integration/testrestapi.nim | 92 ++++++++++++++++++++++++++++ 10 files changed, 178 insertions(+), 31 deletions(-) diff --git a/.github/workflows/nim-matrix.yml b/.github/workflows/nim-matrix.yml index 0b57c8bfd..579ee6a50 100644 --- a/.github/workflows/nim-matrix.yml +++ b/.github/workflows/nim-matrix.yml @@ -6,7 +6,7 @@ on: env: cache_nonce: 0 # Allows for easily busting actions/cache caches - nim_version: pinned, v1.6.16, v1.6.18 + nim_version: pinned jobs: matrix: diff --git a/Makefile b/Makefile index 2e1f97ad3..54ac04d2b 100644 --- a/Makefile +++ b/Makefile @@ -15,7 +15,7 @@ # # If NIM_COMMIT is set to "nimbusbuild", this will use the # version pinned by nimbus-build-system. -PINNED_NIM_VERSION := v1.6.14 +PINNED_NIM_VERSION := 38640664088251bbc88917b4bacfd86ec53014b8 # 1.6.21 ifeq ($(NIM_COMMIT),) NIM_COMMIT := $(PINNED_NIM_VERSION) diff --git a/codex/erasure/asyncbackend.nim b/codex/erasure/asyncbackend.nim index f3bca851f..4827806ad 100644 --- a/codex/erasure/asyncbackend.nim +++ b/codex/erasure/asyncbackend.nim @@ -114,7 +114,14 @@ proc proxySpawnEncodeTask( args: EncodeTaskArgs, data: ref seq[seq[byte]] ): Flowvar[EncodeTaskResult] = - tp.spawn encodeTask(args, data[]) + # FIXME Uncomment the code below after addressing an issue: + # https://github.com/codex-storage/nim-codex/issues/854 + + # tp.spawn encodeTask(args, data[]) + + let fv = EncodeTaskResult.newFlowVar + fv.readyWith(encodeTask(args, data[])) + return fv proc proxySpawnDecodeTask( tp: Taskpool, @@ -122,7 +129,14 @@ proc proxySpawnDecodeTask( data: ref seq[seq[byte]], parity: ref seq[seq[byte]] ): Flowvar[DecodeTaskResult] = - tp.spawn decodeTask(args, data[], parity[]) + # FIXME Uncomment the code below after addressing an issue: + # https://github.com/codex-storage/nim-codex/issues/854 + + # tp.spawn decodeTask(args, data[], parity[]) + + let fv = DecodeTaskResult.newFlowVar + fv.readyWith(decodeTask(args, data[], parity[])) + return fv proc awaitResult[T](signal: ThreadSignalPtr, handle: Flowvar[T]): Future[?!T] {.async.} = await wait(signal) diff --git a/codex/erasure/erasure.nim b/codex/erasure/erasure.nim index 0c921776f..56e3e1cf5 100644 --- a/codex/erasure/erasure.nim +++ b/codex/erasure/erasure.nim @@ -27,6 +27,7 @@ import ../blocktype as bt import ../utils import ../utils/asynciter import ../indexingstrategy +import ../errors import pkg/stew/byteutils @@ -82,6 +83,13 @@ type blocksCount: Natural strategy: StrategyType + ErasureError* = object of CodexError + InsufficientBlocksError* = object of ErasureError + # Minimum size, in bytes, that the dataset must have had + # for the encoding request to have succeeded with the parameters + # provided. + minSize*: NBytes + func indexToPos(steps, idx, step: int): int {.inline.} = ## Convert an index to a position in the encoded ## dataset @@ -236,11 +244,13 @@ proc init*( ecK: Natural, ecM: Natural, strategy: StrategyType): ?!EncodingParams = if ecK > manifest.blocksCount: - return failure( - "Unable to encode manifest, not enough blocks, ecK = " & + let exc = (ref InsufficientBlocksError)( + msg: "Unable to encode manifest, not enough blocks, ecK = " & $ecK & ", blocksCount = " & - $manifest.blocksCount) + $manifest.blocksCount, + minSize: ecK.NBytes * manifest.blockSize) + return failure(exc) let rounded = roundUp(manifest.blocksCount, ecK) diff --git a/codex/node.nim b/codex/node.nim index f2bcb2897..19065c99a 100644 --- a/codex/node.nim +++ b/codex/node.nim @@ -423,6 +423,15 @@ proc setupRequest( trace "Unable to fetch manifest for cid" return failure error + # ---------------------------------------------------------------------------- + # FIXME this is a BAND-AID to address + # https://github.com/codex-storage/nim-codex/issues/852 temporarily for the + # workshop. Remove this once we get that fixed. + if manifest.blocksCount.uint == ecK: + return failure("Cannot setup slots for a dataset with ecK == numBlocks. Please use a larger file or a different combination of `nodes` and `tolerance`.") + # ---------------------------------------------------------------------------- + + # Erasure code the dataset according to provided parameters let erasure = Erasure.new( diff --git a/codex/rest/api.nim b/codex/rest/api.nim index 6b258b228..d44cffa4e 100644 --- a/codex/rest/api.nim +++ b/codex/rest/api.nim @@ -32,6 +32,7 @@ import ../node import ../blocktype import ../conf import ../contracts +import ../erasure/erasure import ../manifest import ../streams/asyncstreamwrapper import ../stores @@ -432,8 +433,16 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) = let nodes = params.nodes |? 1 let tolerance = params.tolerance |? 0 - if (nodes - tolerance) < 1: - return RestApiResponse.error(Http400, "Tolerance cannot be greater or equal than nodes (nodes - tolerance)") + # prevent underflow + if tolerance > nodes: + return RestApiResponse.error(Http400, "Invalid parameters: `tolerance` cannot be greater than `nodes`") + + let ecK = nodes - tolerance + let ecM = tolerance # for readability + + # ensure leopard constrainst of 1 < K ≥ M + if ecK <= 1 or ecK < ecM: + return RestApiResponse.error(Http400, "Invalid parameters: parameters must satify `1 < (nodes - tolerance) ≥ tolerance`") without expiry =? params.expiry: return RestApiResponse.error(Http400, "Expiry required") @@ -451,6 +460,11 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) = params.collateral, expiry), error: + if error of InsufficientBlocksError: + return RestApiResponse.error(Http400, + "Dataset too small for erasure parameters, need at least " & + $(ref InsufficientBlocksError)(error).minSize.int & " bytes") + return RestApiResponse.error(Http500, error.msg) return RestApiResponse.response(purchaseId.toHex) diff --git a/tests/codex/testerasure.nim b/tests/codex/testerasure.nim index 84f4b1c3c..018bb55a9 100644 --- a/tests/codex/testerasure.nim +++ b/tests/codex/testerasure.nim @@ -252,3 +252,22 @@ suite "Erasure encode/decode": decoded.treeCid == manifest.treeCid decoded.treeCid == verifiable.originalTreeCid decoded.blocksCount == verifiable.originalBlocksCount + + for i in 1..5: + test "Should encode/decode using various parameters " & $i & "/5": + let + blockSize = rng.sample(@[1, 2, 4, 8, 16, 32, 64].mapIt(it.KiBs)) + datasetSize = 1.MiBs + ecK = 10.Natural + ecM = 10.Natural + + let + chunker = RandomChunker.new(rng, size = datasetSize, chunkSize = blockSize) + manifest = await storeDataGetManifest(store, chunker) + encoded = (await erasure.encode(manifest, ecK, ecM)).tryGet() + decoded = (await erasure.decode(encoded)).tryGet() + + check: + decoded.treeCid == manifest.treeCid + decoded.treeCid == encoded.originalTreeCid + decoded.blocksCount == encoded.originalBlocksCount \ No newline at end of file diff --git a/tests/integration/codexclient.nim b/tests/integration/codexclient.nim index bce1c27ec..d2d78b465 100644 --- a/tests/integration/codexclient.nim +++ b/tests/integration/codexclient.nim @@ -96,7 +96,7 @@ proc requestStorageRaw*( proofProbability: UInt256, collateral: UInt256, expiry: uint = 0, - nodes: uint = 1, + nodes: uint = 2, tolerance: uint = 0 ): Response = @@ -125,7 +125,7 @@ proc requestStorage*( proofProbability: UInt256, expiry: uint, collateral: UInt256, - nodes: uint = 1, + nodes: uint = 2, tolerance: uint = 0 ): ?!PurchaseId = ## Call request storage REST endpoint diff --git a/tests/integration/testpurchasing.nim b/tests/integration/testpurchasing.nim index 8f5a5befc..bc87f51b3 100644 --- a/tests/integration/testpurchasing.nim +++ b/tests/integration/testpurchasing.nim @@ -8,7 +8,8 @@ import ../examples twonodessuite "Purchasing", debug1 = false, debug2 = false: test "node handles storage request": - let cid = client1.upload("some file contents").get + let data = await RandomChunker.example(blocks=2) + let cid = client1.upload(data).get let id1 = client1.requestStorage(cid, duration=100.u256, reward=2.u256, proofProbability=3.u256, expiry=10, collateral=200.u256).get let id2 = client1.requestStorage(cid, duration=400.u256, reward=5.u256, proofProbability=6.u256, expiry=10, collateral=201.u256).get check id1 != id2 @@ -26,7 +27,7 @@ twonodessuite "Purchasing", debug1 = false, debug2 = false: proofProbability=3.u256, expiry=30, collateral=200.u256, - nodes=2, + nodes=3, tolerance=1).get let request = client1.getPurchase(id).get.request.get @@ -35,7 +36,7 @@ twonodessuite "Purchasing", debug1 = false, debug2 = false: check request.ask.proofProbability == 3.u256 check request.expiry == 30 check request.ask.collateral == 200.u256 - check request.ask.slots == 2'u64 + check request.ask.slots == 3'u64 check request.ask.maxSlotLoss == 1'u64 # TODO: We currently do not support encoding single chunks @@ -52,7 +53,8 @@ twonodessuite "Purchasing", debug1 = false, debug2 = false: # check request.ask.maxSlotLoss == 1'u64 test "node remembers purchase status after restart": - let cid = client1.upload("some file contents").get + let data = await RandomChunker.example(blocks=2) + let cid = client1.upload(data).get let id = client1.requestStorage(cid, duration=100.u256, reward=2.u256, @@ -71,25 +73,12 @@ twonodessuite "Purchasing", debug1 = false, debug2 = false: check request.ask.proofProbability == 3.u256 check request.expiry == 30 check request.ask.collateral == 200.u256 - check request.ask.slots == 1'u64 + check request.ask.slots == 2'u64 check request.ask.maxSlotLoss == 0'u64 - test "request storage fails if nodes and tolerance aren't correct": - let cid = client1.upload("some file contents").get - let responseBefore = client1.requestStorageRaw(cid, - duration=100.u256, - reward=2.u256, - proofProbability=3.u256, - expiry=30, - collateral=200.u256, - nodes=1, - tolerance=1) - - check responseBefore.status == "400 Bad Request" - check responseBefore.body == "Tolerance cannot be greater or equal than nodes (nodes - tolerance)" - test "node requires expiry and its value to be in future": - let cid = client1.upload("some file contents").get + let data = await RandomChunker.example(blocks=2) + let cid = client1.upload(data).get let responseMissing = client1.requestStorageRaw(cid, duration=1.u256, reward=2.u256, proofProbability=3.u256, collateral=200.u256) check responseMissing.status == "400 Bad Request" diff --git a/tests/integration/testrestapi.nim b/tests/integration/testrestapi.nim index 8c2c20e4a..1834dcf2f 100644 --- a/tests/integration/testrestapi.nim +++ b/tests/integration/testrestapi.nim @@ -1,7 +1,9 @@ +import std/httpclient import std/sequtils from pkg/libp2p import `==` import pkg/codex/units import ./twonodes +import ../examples twonodessuite "REST API", debug1 = false, debug2 = false: @@ -36,3 +38,93 @@ twonodessuite "REST API", debug1 = false, debug2 = false: check: [cid1, cid2].allIt(it in list.content.mapIt(it.cid)) + + test "request storage fails for datasets that are too small": + let cid = client1.upload("some file contents").get + let response = client1.requestStorageRaw(cid, duration=10.u256, reward=2.u256, proofProbability=3.u256, nodes=2, collateral=200.u256, expiry=9) + + check: + response.status == "400 Bad Request" + response.body == "Dataset too small for erasure parameters, need at least " & $(2*DefaultBlockSize.int) & " bytes" + + test "request storage succeeds for sufficiently sized datasets": + let data = await RandomChunker.example(blocks=2) + let cid = client1.upload(data).get + let response = client1.requestStorageRaw(cid, duration=10.u256, reward=2.u256, proofProbability=3.u256, collateral=200.u256, expiry=9) + + check: + response.status == "200 OK" + + test "request storage fails if nodes and tolerance aren't correct": + let data = await RandomChunker.example(blocks=2) + let cid = client1.upload(data).get + let duration = 100.u256 + let reward = 2.u256 + let proofProbability = 3.u256 + let expiry = 30.uint + let collateral = 200.u256 + let ecParams = @[(1, 0), (1, 1), (2, 1), (3, 2), (3, 3)] + + for ecParam in ecParams: + let (nodes, tolerance) = ecParam + + var responseBefore = client1.requestStorageRaw(cid, + duration, + reward, + proofProbability, + collateral, + expiry, + nodes.uint, + tolerance.uint) + + check responseBefore.status == "400 Bad Request" + check responseBefore.body == "Invalid parameters: parameters must satify `1 < (nodes - tolerance) ≥ tolerance`" + + test "request storage fails if tolerance > nodes (underflow protection)": + let data = await RandomChunker.example(blocks=2) + let cid = client1.upload(data).get + let duration = 100.u256 + let reward = 2.u256 + let proofProbability = 3.u256 + let expiry = 30.uint + let collateral = 200.u256 + let ecParams = @[(0, 1), (1, 2), (2, 3)] + + for ecParam in ecParams: + let (nodes, tolerance) = ecParam + + var responseBefore = client1.requestStorageRaw(cid, + duration, + reward, + proofProbability, + collateral, + expiry, + nodes.uint, + tolerance.uint) + + check responseBefore.status == "400 Bad Request" + check responseBefore.body == "Invalid parameters: `tolerance` cannot be greater than `nodes`" + + test "request storage succeeds if nodes and tolerance within range": + let data = await RandomChunker.example(blocks=2) + let cid = client1.upload(data).get + let duration = 100.u256 + let reward = 2.u256 + let proofProbability = 3.u256 + let expiry = 30.uint + let collateral = 200.u256 + let ecParams = @[(2, 0), (3, 1), (5, 2)] + + for ecParam in ecParams: + let (nodes, tolerance) = ecParam + + var responseBefore = client1.requestStorageRaw(cid, + duration, + reward, + proofProbability, + collateral, + expiry, + nodes.uint, + tolerance.uint) + + check responseBefore.status == "200 OK"