diff --git a/Makefile b/Makefile
index 3398c21ea..d0652973d 100644
--- a/Makefile
+++ b/Makefile
@@ -35,7 +35,7 @@ gosdk-build: gomod-download
go build -x -v -tags bn256 ./...
wasm-build: getrev
- CGO_ENABLED=0 GOOS=js GOARCH=wasm go build -buildvcs=false -o ./zcn.wasm ./wasmsdk
+ CGO_ENABLED=0 GOOS=js GOARCH=wasm go build -ldflags="-s -w" -buildvcs=false -o ./zcn.wasm ./wasmsdk
wasm-test: wasm-build
env -i $(shell go env) PATH="$(shell go env GOROOT)/misc/wasm:$(PATH)" CGO_ENABLED=0 GOOS=js GOARCH=wasm go test -v github.com/0chain/gosdk/wasmsdk/jsbridge/...
diff --git a/core/version/version.go b/core/version/version.go
index 1244037bc..e6f92099c 100644
--- a/core/version/version.go
+++ b/core/version/version.go
@@ -2,5 +2,5 @@
//====== THIS IS AUTOGENERATED FILE. DO NOT MODIFY ========
package version
-const VERSIONSTR = "v1.12.1-3-gbc68f654"
+const VERSIONSTR = "v1.14.0-RC5-5-g0ec7e4ca"
diff --git a/wasmsdk/blobber.go b/wasmsdk/blobber.go
index 0e7e52e02..8d240f069 100644
--- a/wasmsdk/blobber.go
+++ b/wasmsdk/blobber.go
@@ -667,10 +667,10 @@ func multiUpload(jsonBulkUploadOptions string) (MultiUploadResult, error) {
RemoteName: fileName,
RemotePath: fullRemotePath,
}
- numBlocks := option.NumBlocks
- if numBlocks <= 1 {
- numBlocks = 100
- }
+ numBlocks := 80
+ // if numBlocks <= 1 {
+ // numBlocks = 100
+ // }
options := []sdk.ChunkedUploadOption{
sdk.WithThumbnail(option.ThumbnailBytes.Buffer),
@@ -931,3 +931,25 @@ func getBlobbers(stakable bool) ([]*sdk.Blobber, error) {
}
return blobbs, err
}
+
+type timingRes struct {
+ UploadTime int64 `json:"upload_time"`
+ TotalReadChunkTime int64 `json:"total_read_chunk_time"`
+ ReadTime int64 `json:"read_time"`
+ TotalTime int64 `json:"total_time"`
+ TotalBuildTime int64 `json:"total_build_time"`
+ TotalBlobberUpload int64 `json:"total_blobber_upload"`
+}
+
+func getUploadTiming() string {
+ res := timingRes{
+ UploadTime: sdk.TotalUploadTime,
+ TotalReadChunkTime: sdk.TotalReadChunkTime,
+ ReadTime: sdk.TotalReadTime,
+ TotalTime: sdk.TotalTime,
+ TotalBuildTime: sdk.TotalFormBuildTime,
+ TotalBlobberUpload: sdk.TotalUploadBlobberTime,
+ }
+ respBytes, _ := json.Marshal(res)
+ return string(respBytes)
+}
diff --git a/wasmsdk/demo/index.html b/wasmsdk/demo/index.html
index d25c4178d..e5f8ea055 100644
--- a/wasmsdk/demo/index.html
+++ b/wasmsdk/demo/index.html
@@ -4,6 +4,7 @@
+
@@ -171,10 +172,10 @@
please download zcn.wasm from https://github.com/0chain/gosdk/releases/lates
}
const getWallet = () => {
- const clientID = "b1d533fa60431a76014c4f94a7e8e19a3b1a7f34eebd4cacd29a8dd948b3844c"
- const publicKey = "1f30a07b34146435cabc3244a1452fb8933f6982e0b33f384e5d25b9d6531e24e342003349990483f8481052a0748cbc72355d9cbda621ec914f7ed03c127791"
- const privateKey = "fb98f46969be6921586e547b0f6f70c6b92d7823359f00bafa3900523910661a"
- const mnemonic = "snake second property crush thrive monkey already lake fire sort cheap lake census adult this cloth panic filter taste punch pistol project rack obscure"
+ const clientID = "2ba00d37a162b3fe7783fd65c0507bca67418e9b2cac2a25244339f00ac30026"
+ const publicKey = "174cb917c5e5e5a7906b244bc73d03dc8adaf85a61d7c31989959ad1b4ffaa01884556b5e739f01bea08a329c14552e85540300484b24037962b730cca3dbd0c"
+ const privateKey = "8564a063420e98ee6826f56342b81a4ab3e5acab0b5bd5d9598b8c7207656415"
+ const mnemonic = "grunt human dog license demise black similar level satoshi hockey guide wrap drive chase lazy engage cruel fly unable verb point else minor where"
return {
clientID, publicKey, privateKey, mnemonic
}
@@ -211,7 +212,7 @@ please download zcn.wasm from https://github.com/0chain/gosdk/releases/lates
let network = query.get('network')
if (!network || network == 'undefined') {
- network = "dev.zus.network"
+ network = "demo.zus.network"
}
const blockWorker = 'https://' + network + '/dns';
@@ -441,13 +442,14 @@ please download zcn.wasm from https://github.com/0chain/gosdk/releases/lates
alert("please selection allocationID")
return
}
- const { list = [] } = await goWasm.sdk.listObjects(allocationId, '/')
+ const { list = [] } = await goWasm.sdk.listObjects(allocationId, '/',0,100)
files = list || []
bindFiles()
})
onClick('btnUploadFile', async () => {
const { files } = get('inputSelectedFile')
+ console.log("start upload")
if (files && files.length > 0) {
const objects = []
@@ -457,16 +459,26 @@ please download zcn.wasm from https://github.com/0chain/gosdk/releases/lates
allocationId: allocationId,
remotePath: `/${file.name}`,
file: file,
- thumbnailBytes: await readBytes(file),//only for demo, don't upload original file as thumbnail in production
encrypt: false,
webstreaming: false,
isUpdate: false,
isRepair: false,
- numBlocks: 100,
+ numBlocks: 250,
callback: function (totalBytes, completedBytes, error) {
console.log(file.name + " " + completedBytes + "/" + totalBytes + " err:" + error)
}
})
+ {{/* const startChunkSize = 3 * 64 * 1024;
+ let endOffset = startChunkSize + 335413248;
+ //Time this for loop
+ const startTime = new Date().getTime();
+ for (let j = 0; j < endOffset; j += startChunkSize) {
+ console.log("readChunk",j)
+ const chunk = await readChunk(j, startChunkSize, file);
+ } */}}
+
+ {{/* const endTime = new Date().getTime();
+ console.log('Time taken: ', endTime - startTime); */}}
}
const results = await goWasm.bulkUpload(objects)
console.log(JSON.stringify(results))
diff --git a/wasmsdk/demo/zcn.js b/wasmsdk/demo/zcn.js
index 3f5fd8e73..6d463b0e1 100644
--- a/wasmsdk/demo/zcn.js
+++ b/wasmsdk/demo/zcn.js
@@ -15,68 +15,66 @@
* along with this program. If not, see .
*/
-'use strict'
+'use strict';
-const g = window
+const g = window;
function hexStringToByte(str) {
- if (!str) return new Uint8Array()
+ if (!str) return new Uint8Array();
- const a = []
+ const a = [];
for (let i = 0, len = str.length; i < len; i += 2) {
- a.push(parseInt(str.substr(i, 2), 16))
+ a.push(parseInt(str.substr(i, 2), 16));
}
- return new Uint8Array(a)
+ return new Uint8Array(a);
}
function blsSign(hash, secretKey) {
- const { jsProxy } = g.__zcn_wasm__
+ const { jsProxy } = g.__zcn_wasm__;
if (!jsProxy || !secretKey) {
- const errMsg = 'err: bls.secretKey is not initialized'
- console.warn(errMsg)
- throw new Error(errMsg)
+ const errMsg = 'err: bls.secretKey is not initialized';
+ console.warn(errMsg);
+ throw new Error(errMsg);
}
- const bytes = hexStringToByte(hash)
- const sk = bls.deserializeHexStrToSecretKey(secretKey)
- const sig = sk.sign(bytes)
+ const bytes = hexStringToByte(hash);
+ const sk = bls.deserializeHexStrToSecretKey(secretKey);
+ const sig = sk.sign(bytes);
if (!sig) {
- const errMsg = 'err: wasm blsSign function failed to sign transaction'
- console.warn(errMsg)
- throw new Error(errMsg)
+ const errMsg = 'err: wasm blsSign function failed to sign transaction';
+ console.warn(errMsg);
+ throw new Error(errMsg);
}
- return sig.serializeToHexStr()
+ return sig.serializeToHexStr();
}
async function createObjectURL(buf, mimeType) {
- var blob = new Blob([buf], { type: mimeType })
- return URL.createObjectURL(blob)
+ var blob = new Blob([buf], { type: mimeType });
+ return URL.createObjectURL(blob);
}
-
const readChunk = (offset, chunkSize, file) =>
- new Promise((res,rej) => {
- const fileReader = new FileReader()
- const blob = file.slice(offset, chunkSize+offset)
- fileReader.onload = e => {
- const t = e.target
+ new Promise((res, rej) => {
+ const fileReader = new FileReader();
+ const blob = file.slice(offset, chunkSize + offset);
+ fileReader.onload = (e) => {
+ const t = e.target;
if (t.error == null) {
res({
size: t.result.byteLength,
- buffer: new Uint8Array(t.result)
- })
- }else{
- rej(t.error)
+ buffer: new Uint8Array(t.result),
+ });
+ } else {
+ rej(t.error);
}
- }
-
- fileReader.readAsArrayBuffer(blob)
- })
+ };
+ fileReader.readAsArrayBuffer(blob);
+ });
/**
* Sleep is used when awaiting for Go Wasm to initialize.
@@ -89,12 +87,10 @@ const readChunk = (offset, chunkSize, file) =>
* completed.
*/
const sleep = (ms = 1000) =>
- new Promise(res => {
- requestAnimationFrame(res)
- setTimeout(res, ms)
- })
-
-
+ new Promise((res) => {
+ requestAnimationFrame(res);
+ setTimeout(res, ms);
+ });
/**
* The maximum amount of time that we would expect Wasm to take to initialize.
@@ -102,12 +98,12 @@ const sleep = (ms = 1000) =>
* Most likely something has gone wrong if it takes more than 3 seconds to
* initialize.
*/
-const maxTime = 10 * 1000
+const maxTime = 10 * 1000;
// Initialize __zcn_wasm__
g.__zcn_wasm__ = g.__zcn_wasm_ || {
- glob:{
- index:0,
+ glob: {
+ index: 0,
},
jsProxy: {
secretKey: null,
@@ -119,12 +115,12 @@ g.__zcn_wasm__ = g.__zcn_wasm_ || {
sleep,
},
sdk: {}, //proxy object for go to expose its methods
-}
+};
/**
* bridge is an easier way to refer to the Go WASM object.
*/
-const bridge = g.__zcn_wasm__
+const bridge = g.__zcn_wasm__;
// bulk upload files with FileReader
// objects: the list of upload object
@@ -138,99 +134,125 @@ const bridge = g.__zcn_wasm__
// - numBlocks: int
// - callback: function(totalBytes,completedBytes,error)
async function bulkUpload(options) {
- const start = bridge.glob.index
- const opts = options.map(obj=>{
+ console.log('func called');
+ const start = bridge.glob.index;
+ // let hasher = await hashwasm.createMD5();
+ // hasher.init();
+ const opts = options.map((obj) => {
const i = bridge.glob.index;
- bridge.glob.index++
- const readChunkFuncName = "__zcn_upload_reader_"+i.toString()
- const callbackFuncName = "__zcn_upload_callback_"+i.toString()
- g[readChunkFuncName] = async (offset,chunkSize) => {
- console.log("multi_upload: read chunk remotePath:"+ obj.remotePath + " offset:"+offset+" chunkSize:"+chunkSize)
- const chunk = await readChunk(offset,chunkSize,obj.file)
- return chunk.buffer
- }
-
- if(obj.callback) {
- g[callbackFuncName] = async (totalBytes,completedBytes,error)=> obj.callback(totalBytes,completedBytes,error)
+ bridge.glob.index++;
+ const readChunkFuncName = '__zcn_upload_reader_' + i.toString();
+ const callbackFuncName = '__zcn_upload_callback_' + i.toString();
+ g[readChunkFuncName] = async (offset, chunkSize) => {
+ console.log(
+ 'multi_upload: read chunk remotePath:' +
+ obj.remotePath +
+ ' offset:' +
+ offset +
+ ' chunkSize:' +
+ chunkSize
+ );
+ const chunk = await readChunk(offset, chunkSize, obj.file);
+ // hasher.update(chunk.buffer);
+ // if (chunk.size < chunkSize) {
+ // const hash = hasher.digest();
+ // console.log('multi_upload: hash ' + hash);
+ // }
+ return chunk.buffer;
+ };
+
+ if (obj.callback) {
+ g[callbackFuncName] = async (totalBytes, completedBytes, error) =>
+ obj.callback(totalBytes, completedBytes, error);
}
return {
- allocationId:obj.allocationId,
- remotePath:obj.remotePath,
- readChunkFuncName:readChunkFuncName,
+ allocationId: obj.allocationId,
+ remotePath: obj.remotePath,
+ readChunkFuncName: readChunkFuncName,
fileSize: obj.file.size,
- thumbnailBytes:obj.thumbnailBytes?obj.thumbnailBytes.toString():"",
- encrypt:obj.encrypt,
- webstreaming:obj.webstreaming,
- isUpdate:obj.isUpdate,
- isRepair:obj.isRepair,
- numBlocks:obj.numBlocks,
- callbackFuncName:callbackFuncName
- }
- })
-
- const end = bridge.glob.index
- const result = await bridge.__proxy__.sdk.multiUpload(JSON.stringify(opts))
- for (let i=start; i {
- const source = await (await resp).arrayBuffer()
- return await WebAssembly.instantiate(source, importObject)
- }
+ const source = await (await resp).arrayBuffer();
+ return await WebAssembly.instantiate(source, importObject);
+ };
}
const result = await WebAssembly.instantiateStreaming(
await fetch('zcn.wasm'),
go.importObject
- )
+ );
setTimeout(() => {
if (g.__zcn_wasm__?.__wasm_initialized__ !== true) {
console.warn(
'wasm window.__zcn_wasm__ (zcn.__wasm_initialized__) still not true after max time'
- )
+ );
}
- }, maxTime)
+ }, maxTime);
- go.run(result.instance)
+ go.run(result.instance);
}
async function createWasm() {
if (bridge.__proxy__) {
- return bridge.__proxy__
+ return bridge.__proxy__;
}
- const go = new g.Go()
+ const go = new g.Go();
- loadWasm(go)
+ loadWasm(go);
const sdkGet =
(_, key) =>
@@ -274,74 +296,69 @@ async function createWasm() {
// eslint-disable-next-line
new Promise(async (resolve, reject) => {
if (!go || go.exited) {
- return reject(new Error('The Go instance is not active.'))
+ return reject(new Error('The Go instance is not active.'));
}
while (bridge.__wasm_initialized__ !== true) {
- await sleep(1000)
+ await sleep(1000);
}
if (typeof bridge.sdk[key] !== 'function') {
- resolve(bridge.sdk[key])
+ resolve(bridge.sdk[key]);
if (args.length !== 0) {
reject(
new Error(
'Retrieved value from WASM returned function type, however called with arguments.'
)
- )
+ );
}
- return
+ return;
}
try {
- let resp = bridge.sdk[key].apply(undefined, args)
+ let resp = bridge.sdk[key].apply(undefined, args);
// support wasm.BindAsyncFunc
if (resp && typeof resp.then === 'function') {
- resp = await Promise.race([resp])
+ resp = await Promise.race([resp]);
}
if (resp && resp.error) {
- reject(resp.error)
+ reject(resp.error);
} else {
- resolve(resp)
+ resolve(resp);
}
} catch (e) {
- reject(e)
+ reject(e);
}
- })
+ });
const sdkProxy = new Proxy(
- {
-
- },
+ {},
{
get: sdkGet,
}
- )
+ );
const jsProxy = new Proxy(
{},
{
get: (_, key) => bridge.jsProxy[key],
set: (_, key, value) => {
- bridge.jsProxy[key] = value
+ bridge.jsProxy[key] = value;
},
}
- )
+ );
const proxy = {
bulkUpload: bulkUpload,
setWallet: setWallet,
sdk: sdkProxy, //expose sdk methods for js
jsProxy, //expose js methods for go
- }
+ };
- bridge.__proxy__ = proxy
+ bridge.__proxy__ = proxy;
- return proxy
+ return proxy;
}
-
-
-
diff --git a/wasmsdk/jsbridge/file_reader.go b/wasmsdk/jsbridge/file_reader.go
index 22a88ad64..9d2da5630 100644
--- a/wasmsdk/jsbridge/file_reader.go
+++ b/wasmsdk/jsbridge/file_reader.go
@@ -5,6 +5,7 @@ package jsbridge
import (
"errors"
+ "fmt"
"io"
"syscall/js"
)
@@ -40,13 +41,15 @@ func NewFileReader(readChunkFuncName string, fileSize, chunkReadSize int64) (*Fi
if n < len(buf) {
return nil, errors.New("file_reader: failed to read first chunk")
}
- return &FileReader{
+ fr := &FileReader{
size: fileSize,
offset: int64(n),
readChunk: readChunk,
buf: buf,
endOfFile: n == int(fileSize),
- }, nil
+ }
+ fmt.Println("file_reader: created", fr.size, fr.offset, fr.endOfFile)
+ return fr, nil
}
func (r *FileReader) Read(p []byte) (int, error) {
@@ -68,12 +71,15 @@ func (r *FileReader) Read(p []byte) (int, error) {
if n < len(r.buf) {
r.buf = r.buf[:n]
r.endOfFile = true
+ fmt.Println("file_reader: reached end of file", r.offset, r.size)
}
}
n := copy(p, r.buf[r.bufOffset:])
+ fmt.Println("file_reader: read", r.offset, r.bufOffset, len(r.buf), len(p), n)
r.bufOffset += n
if r.endOfFile && r.bufOffset == len(r.buf) {
+ fmt.Println("file_reader: last call", r.offset, r.bufOffset, len(r.buf), len(p), n)
return n, io.EOF
}
@@ -96,6 +102,10 @@ func (r *FileReader) Seek(offset int64, whence int) (int64, error) {
if abs < 0 {
return 0, errors.New("FileReader.Seek: negative position")
}
- r.offset = abs
+ if abs > int64(len(r.buf)) {
+ return 0, errors.New("FileReader.Seek: position out of bounds")
+ }
+ r.bufOffset = int(abs)
+ fmt.Println("file_reader: seek", r.bufOffset, abs)
return abs, nil
}
diff --git a/wasmsdk/proxy.go b/wasmsdk/proxy.go
index fca2681f3..de3c6962b 100644
--- a/wasmsdk/proxy.go
+++ b/wasmsdk/proxy.go
@@ -187,6 +187,7 @@ func main() {
"updateForbidAllocation": UpdateForbidAllocation,
"send": send,
"cancelUpload": cancelUpload,
+ "uploadTiming": getUploadTiming,
// player
"play": play,
diff --git a/zboxcore/sdk/allocation.go b/zboxcore/sdk/allocation.go
index 83349ea6f..90b7f9e1e 100644
--- a/zboxcore/sdk/allocation.go
+++ b/zboxcore/sdk/allocation.go
@@ -813,6 +813,13 @@ func (a *Allocation) DoMultiOperation(operations []OperationRequest, opts ...Mul
if !a.isInitialized() {
return notInitialized
}
+ TotalFormBuildTime = 0 // reset the time
+ TotalUploadTime = 0 // reset the time
+ TotalReadChunkTime = 0 // reset the time
+ TotalUploadBlobberTime = 0 // reset the time
+ TotalTime = 0 // reset the time
+ TotalReadTime = 0 // reset the time
+
connectionID := zboxutil.NewConnectionId()
var mo MultiOperation
for i := 0; i < len(operations); {
diff --git a/zboxcore/sdk/chunked_upload.go b/zboxcore/sdk/chunked_upload.go
index aa4b9729a..67a39f921 100644
--- a/zboxcore/sdk/chunked_upload.go
+++ b/zboxcore/sdk/chunked_upload.go
@@ -47,7 +47,7 @@ var (
ErrNoEnoughSpaceLeftInAllocation = errors.New("alloc: no enough space left in allocation")
CancelOpCtx = make(map[string]context.CancelCauseFunc)
cancelLock sync.Mutex
- CurrentMode = UploadModeMedium
+ CurrentMode = UploadModeHigh
)
// DefaultChunkSize default chunk size for file and thumbnail
@@ -440,9 +440,9 @@ func (su *ChunkedUpload) process() error {
defer su.chunkReader.Close()
defer su.ctxCncl(nil)
for {
-
+ now := time.Now()
chunks, err := su.readChunks(su.chunkNumber)
-
+ TotalReadTime += time.Since(now).Milliseconds()
// chunk, err := su.chunkReader.Next()
if err != nil {
if su.statusCallback != nil {
@@ -643,7 +643,7 @@ func (su *ChunkedUpload) processUpload(chunkStartIndex, chunkEndIndex int,
uploadBody: make([]blobberData, len(su.blobbers)),
uploadLength: uploadLength,
}
-
+ now := time.Now()
wgErrors := make(chan error, len(su.blobbers))
if len(fileShards) == 0 {
return thrown.New("upload_failed", "Upload failed. No data to upload")
@@ -697,6 +697,7 @@ func (su *ChunkedUpload) processUpload(chunkStartIndex, chunkEndIndex int,
su.removeProgress()
return thrown.New("upload_failed", fmt.Sprintf("Upload failed. %s", err))
}
+ TotalFormBuildTime += time.Since(now).Milliseconds()
if !lastBufferOnly {
su.uploadWG.Add(1)
select {
@@ -817,6 +818,7 @@ func (su *ChunkedUpload) uploadToBlobbers(uploadData UploadData) error {
return context.Cause(su.ctx)
default:
}
+ now := time.Now()
consensus := Consensus{
RWMutex: &sync.RWMutex{},
consensusThresh: su.consensus.consensusThresh,
@@ -873,6 +875,7 @@ func (su *ChunkedUpload) uploadToBlobbers(uploadData UploadData) error {
su.statusCallback.InProgress(su.allocationObj.ID, su.fileMeta.RemotePath, su.opCode, int(atomic.AddInt64(&su.progress.UploadLength, uploadLength)), nil)
}
}
+ atomic.AddInt64(&TotalUploadBlobberTime, time.Since(now).Milliseconds())
uploadData = UploadData{} // release memory
return nil
}
diff --git a/zboxcore/sdk/chunked_upload_blobber.go b/zboxcore/sdk/chunked_upload_blobber.go
index 886464aff..1d021084b 100644
--- a/zboxcore/sdk/chunked_upload_blobber.go
+++ b/zboxcore/sdk/chunked_upload_blobber.go
@@ -9,7 +9,6 @@ import (
"mime/multipart"
"net/http"
"strings"
- "syscall"
"time"
"github.com/0chain/errors"
@@ -22,7 +21,6 @@ import (
"github.com/0chain/gosdk/zboxcore/logger"
"github.com/0chain/gosdk/zboxcore/marker"
"github.com/0chain/gosdk/zboxcore/zboxutil"
- "github.com/hitenjain14/fasthttp"
"golang.org/x/sync/errgroup"
)
@@ -73,40 +71,47 @@ func (sb *ChunkedUploadBlobber) sendUploadRequest(
for dataInd := 0; dataInd < len(dataBuffers); dataInd++ {
ind := dataInd
eg.Go(func() error {
- var (
- shouldContinue bool
- )
- var req *fasthttp.Request
+ req, err := zboxutil.NewUploadRequestWithMethod(
+ sb.blobber.Baseurl, su.allocationObj.ID, su.allocationObj.Tx, dataBuffers[ind], su.httpMethod)
+ if err != nil {
+ return err
+ }
+ req.Header.Add("Content-Type", contentSlice[ind])
for i := 0; i < 3; i++ {
- req, err = zboxutil.NewFastUploadRequest(
- sb.blobber.Baseurl, su.allocationObj.ID, su.allocationObj.Tx, dataBuffers[ind].Bytes(), su.httpMethod)
- if err != nil {
- return err
- }
+ err, shouldContinue := func() (err error, shouldContinue bool) {
+ reqCtx, ctxCncl := context.WithTimeout(ctx, su.uploadTimeOut)
+ var resp *http.Response
+ err = zboxutil.HttpDo(reqCtx, ctxCncl, req, func(r *http.Response, err error) error {
+ resp = r
+ return err
+ })
+ defer ctxCncl()
- req.Header.Add("Content-Type", contentSlice[ind])
- err, shouldContinue = func() (err error, shouldContinue bool) {
- resp := fasthttp.AcquireResponse()
- defer fasthttp.ReleaseResponse(resp)
- err = zboxutil.FastHttpClient.DoTimeout(req, resp, su.uploadTimeOut)
- fasthttp.ReleaseRequest(req)
if err != nil {
logger.Logger.Error("Upload : ", err)
- if errors.Is(err, fasthttp.ErrConnectionClosed) || errors.Is(err, syscall.EPIPE) {
- return err, true
- }
return fmt.Errorf("Error while doing reqeust. Error %s", err), false
}
- if resp.StatusCode() == http.StatusOK {
+ if resp.Body != nil {
+ defer resp.Body.Close()
+ }
+ if resp.StatusCode == http.StatusOK {
+ io.Copy(io.Discard, resp.Body) //nolint:errcheck
return
}
+ var r UploadResult
+ var respbody []byte
+
+ respbody, err = io.ReadAll(resp.Body)
+ if err != nil {
+ logger.Logger.Error("Error: Resp ", err)
+ return fmt.Errorf("Error while reading body. Error %s", err), false
+ }
- respbody := resp.Body()
- if resp.StatusCode() == http.StatusTooManyRequests {
+ if resp.StatusCode == http.StatusTooManyRequests {
logger.Logger.Error("Got too many request error")
var r int
- r, err = zboxutil.GetFastRateLimitValue(resp)
+ r, err = zboxutil.GetRateLimitValue(resp)
if err != nil {
logger.Logger.Error(err)
return
@@ -116,25 +121,32 @@ func (sb *ChunkedUploadBlobber) sendUploadRequest(
return
}
- msg := string(respbody)
- logger.Logger.Error(sb.blobber.Baseurl,
- " Upload error response: ", resp.StatusCode(),
- "err message: ", msg)
- err = errors.Throw(constants.ErrBadRequest, msg)
+ if resp.StatusCode != http.StatusOK {
+ msg := string(respbody)
+ logger.Logger.Error(sb.blobber.Baseurl,
+ " Upload error response: ", resp.StatusCode,
+ "err message: ", msg)
+ err = errors.Throw(constants.ErrBadRequest, msg)
+ return
+ }
+
+ err = json.Unmarshal(respbody, &r)
+ if err != nil {
+ logger.Logger.Error(sb.blobber.Baseurl, "Upload response parse error: ", err)
+ return
+ }
return
}()
- if shouldContinue {
- continue
- }
-
if err != nil {
return err
}
-
+ if shouldContinue {
+ continue
+ }
break
}
- return err
+ return nil
})
}
err = eg.Wait()
diff --git a/zboxcore/sdk/chunked_upload_chunk_reader.go b/zboxcore/sdk/chunked_upload_chunk_reader.go
index 38570ec98..ecba438c5 100644
--- a/zboxcore/sdk/chunked_upload_chunk_reader.go
+++ b/zboxcore/sdk/chunked_upload_chunk_reader.go
@@ -5,6 +5,7 @@ import (
"math"
"strconv"
"sync"
+ "time"
"github.com/0chain/errors"
"github.com/0chain/gosdk/constants"
@@ -158,11 +159,13 @@ func (r *chunkedUploadChunkReader) Next() (*ChunkData, error) {
readLen int
err error
)
+ now := time.Now()
for readLen < len(chunkBytes) && err == nil {
var nn int
nn, err = r.fileReader.Read(chunkBytes[readLen:])
readLen += nn
}
+ TotalReadChunkTime += time.Since(now).Milliseconds()
if err != nil {
if !errors.Is(err, io.EOF) {
diff --git a/zboxcore/sdk/multi_operation_worker.go b/zboxcore/sdk/multi_operation_worker.go
index ca71709a5..904116973 100644
--- a/zboxcore/sdk/multi_operation_worker.go
+++ b/zboxcore/sdk/multi_operation_worker.go
@@ -40,6 +40,15 @@ func WithRepair() MultiOperationOption {
}
}
+var (
+ TotalUploadTime int64
+ TotalTime int64
+ TotalReadTime int64
+ TotalReadChunkTime int64
+ TotalFormBuildTime int64
+ TotalUploadBlobberTime int64
+)
+
type Operationer interface {
Process(allocObj *Allocation, connectionID string) ([]fileref.RefEntity, zboxutil.Uint128, error)
buildChange(refs []fileref.RefEntity, uid uuid.UUID) []allocationchange.AllocationChange
@@ -160,6 +169,8 @@ func (mo *MultiOperation) createConnectionObj(blobberIdx int) (err error) {
func (mo *MultiOperation) Process() error {
l.Logger.Info("MultiOperation Process start")
+ TotalReadTime = 0
+ TotalReadChunkTime = 0
wg := &sync.WaitGroup{}
mo.changes = make([][]allocationchange.AllocationChange, len(mo.operations))
ctx := mo.ctx
@@ -168,6 +179,7 @@ func (mo *MultiOperation) Process() error {
swg := sizedwaitgroup.New(BatchSize)
errsSlice := make([]error, len(mo.operations))
mo.operationMask = zboxutil.NewUint128(0)
+ now := time.Now()
for idx, op := range mo.operations {
uid := util.GetNewUUID()
swg.Add()
@@ -196,7 +208,8 @@ func (mo *MultiOperation) Process() error {
}(op, idx)
}
swg.Wait()
-
+ logger.Logger.Info("[process]", time.Since(now).Milliseconds())
+ TotalUploadTime = time.Since(now).Milliseconds()
// Check consensus
if mo.operationMask.CountOnes() < mo.consensusThresh || ctx.Err() != nil {
majorErr := zboxutil.MajorError(errsSlice)
@@ -353,6 +366,7 @@ func (mo *MultiOperation) Process() error {
op.Completed(mo.allocationObj)
}
}
+ TotalTime = time.Since(now).Milliseconds()
return nil
diff --git a/zboxcore/zboxutil/http.go b/zboxcore/zboxutil/http.go
index e52eb9156..fcd95e74e 100644
--- a/zboxcore/zboxutil/http.go
+++ b/zboxcore/zboxutil/http.go
@@ -646,18 +646,15 @@ func NewFastUploadRequest(baseURL, allocationID string, allocationTx string, bod
return req, nil
}
-func NewUploadRequest(baseUrl, allocationID string, allocationTx string, body io.Reader, update bool) (*http.Request, error) {
+func NewUploadRequest(baseUrl, allocationID string, allocationTx string, body io.Reader, method string) (*http.Request, error) {
u, err := joinUrl(baseUrl, UPLOAD_ENDPOINT, allocationTx)
if err != nil {
return nil, err
}
var req *http.Request
- if update {
- req, err = http.NewRequest(http.MethodPut, u.String(), body)
- } else {
- req, err = http.NewRequest(http.MethodPost, u.String(), body)
- }
+ req, err = http.NewRequest(method, u.String(), body)
+
if err != nil {
return nil, err
}
diff --git a/zboxcore/zboxutil/transport.go b/zboxcore/zboxutil/transport.go
index 7cf7d4d5b..c05ace567 100644
--- a/zboxcore/zboxutil/transport.go
+++ b/zboxcore/zboxutil/transport.go
@@ -22,4 +22,5 @@ var DefaultTransport = &http.Transport{
ExpectContinueTimeout: 1 * time.Second,
MaxIdleConnsPerHost: 25,
WriteBufferSize: 16 * 1024,
+ ForceAttemptHTTP2: true,
}
diff --git a/zboxcore/zboxutil/transport_wasm.go b/zboxcore/zboxutil/transport_wasm.go
index 553f6d807..89c3019e6 100644
--- a/zboxcore/zboxutil/transport_wasm.go
+++ b/zboxcore/zboxutil/transport_wasm.go
@@ -16,4 +16,5 @@ var DefaultTransport = &http.Transport{
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
MaxIdleConnsPerHost: 100,
+ ForceAttemptHTTP2: true,
}