You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
constinitializeAndSetUppy=async()=>{constuppy=newUppy({meta: {type: "avatar"},restrictions: {maxNumberOfFiles: 1,allowedFileTypes: ["application/zip","application/x-zip-compressed","application/x-rar-compressed","text/plain",],},autoProceed: true,});constaccessToken=awaitsec.getAccessTokenSilently()();uppy.use(AwsS3Multipart,{limit: 5,retryDelays: [0,1000,3000,5000,30000,60000],companionUrl: companion,serverHeaders: {authorization: "Bearer "+accessToken},companionHeaders: {Authorization: "Bearer "+accessToken},getChunkSize: ()=>{return20000000;},contentDisposition: "attachment",});uppy.on("complete",(result)=>{dispatch(setUploadComplete(true));SetUploadStatus(true);onUploadChange(result.successful,result.successful[0].s3Multipart.key,true);});uppy.on("upload-error",(file,error)=>{dispatch(setError(true));SetUploadStatus(false);if(error.isNetworkError){// Let your users know that file upload could have failed// due to firewall or ISP issuesdispatch(setAlertMessage({message:
"There is a network error. Please check your internet connection and try again later.",type: "failure",autoHideDuration: 20000,}));}elseif(error.isAuthError){logout({returnTo: import.meta.env.VITE_APP_URL});}else{dispatch(setAlertMessage({message: "Cannot upload file",type: "failure",autoHideDuration: 20000,}));}});uppy.on("upload-progress",async(file,progress)=>{setLoadingFile();});uppy.on("file-added",async(file)=>{dispatch(setFileName(file.name));});uppy.on("file-removed",async(file)=>{setInitialStatus();onUploadChange(file,"",false);});dispatch(setUppy(uppy));}}
This is used in conjunction with Uppy Companion on the back-end.
The package from Uppy used in this service is:
"@uppy/companion": "^4.9.0",
The basic configuration there is
module.exports=({ config })=>{constoptions={s3: {getKey: (req,filename,metadata)=>{returns3name(filename,metadata);},key: config.aws.access_id,secret: config.aws.access_key,bucket: config.aws.s3_bucket,region: config.aws.region,endpoint: "https://s3."+config.aws.region+".amazonaws.com",signatureVersion: "v4",acl: "private",expires: 2*60*60,// Give it 2 X 60 minutes to account for very slow connections},uploadUrls: ["https://"+config.aws.s3_bucket+".s3."+config.aws.region+".amazonaws.com",],server: {host: config.api.port,path: config.aws.lb_path},filePath: ".",// Not needed for s3 but it insistssecret: config.uppy.secret,// Not for S3 either but it needs something setdebug: false,};const{app: companionApp}=companion.app(options);returncompanionApp;};
I have never had an issue with Uppy before and find it a great service that is provided via open source. Just spotted this as one of our clients mentioned an upload was hanging indefinitely in the browser and after a lot of debugging I was able to narrow it down to a change in the code in the aws-s3-multipart package.
Expected behavior
The upload-error event should be triggered if the upload fails beyond allotted retryDelays provided to AwsS3Multipart.
Actual behavior
The browser hangs indefinitely and the upload-error event is not triggered on loss of network connection. It looks to me like the requests are not being retried according to the provided retryInterval configuration.
I have been able to resolve this by reverting the code changes in #4691 in the aws-s3-multipart package.
When I use the below code (code with the changes in #4691 reverted), the upload-error event is triggered after all of the retryIntervals have been exceeded.
all plugin code
importBasePluginfrom"@uppy/core/lib/BasePlugin.js";import{RequestClient}from"@uppy/companion-client";importEventManagerfrom"@uppy/utils/lib/EventManager";import{RateLimitedQueue}from"@uppy/utils/lib/RateLimitedQueue";import{filterNonFailedFiles,filterFilesToEmitUploadStarted,}from"@uppy/utils/lib/fileFilters";import{createAbortError}from"@uppy/utils/lib/AbortController";importMultipartUploader,{pausingUploadReason}from"./MultipartUploader.js";importcreateSignedURLfrom"./createSignedURL.js";functionassertServerError(res){if(res&&res.error){consterror=newError(res.message);Object.assign(error,res.error);throwerror;}returnres;}functionremoveMetadataFromURL(urlString){consturlObject=newURL(urlString);urlObject.search="";urlObject.hash="";returnurlObject.href;}/** * Computes the expiry time for a request signed with temporary credentials. If * no expiration was provided, or an invalid value (e.g. in the past) is * provided, undefined is returned. This function assumes the client clock is in * sync with the remote server, which is a requirement for the signature to be * validated for AWS anyway. * * @param {import('../types/index.js').AwsS3STSResponse['credentials']} credentials * @returns {number | undefined} */functiongetExpiry(credentials){constexpirationDate=credentials.Expiration;if(expirationDate){consttimeUntilExpiry=Math.floor((newDate(expirationDate)-Date.now())/1000);if(timeUntilExpiry>9){returntimeUntilExpiry;}}returnundefined;}functiongetAllowedMetadata({ meta, allowedMetaFields, querify =false}){constmetaFields=allowedMetaFields??Object.keys(meta);if(!meta)return{};returnObject.fromEntries(metaFields.filter((key)=>meta[key]!=null).map((key)=>{constrealKey=querify ? `metadata[${key}]` : key;constvalue=String(meta[key]);return[realKey,value];}));}functionthrowIfAborted(signal){if(signal?.aborted){throwcreateAbortError("The operation was aborted",{cause: signal.reason,});}}classHTTPCommunicationQueue{
#abortMultipartUpload;
#cache=newWeakMap();
#createMultipartUpload;
#fetchSignature;
#getUploadParameters;
#listParts;
#previousRetryDelay;
#requests;
#retryDelayIterator;
#sendCompletionRequest;
#setS3MultipartState;
#uploadPartBytes;
#getFile;constructor(requests,options,setS3MultipartState,getFile){this.#requests=requests;this.#setS3MultipartState=setS3MultipartState;this.#getFile=getFile;this.setOptions(options);}setOptions(options){constrequests=this.#requests;if("abortMultipartUpload"inoptions){this.#abortMultipartUpload=requests.wrapPromiseFunction(options.abortMultipartUpload,{priority: 1});}if("createMultipartUpload"inoptions){this.#createMultipartUpload=requests.wrapPromiseFunction(options.createMultipartUpload,{priority: -1});}if("signPart"inoptions){this.#fetchSignature=requests.wrapPromiseFunction(options.signPart);}if("listParts"inoptions){this.#listParts=requests.wrapPromiseFunction(options.listParts);}if("completeMultipartUpload"inoptions){this.#sendCompletionRequest=requests.wrapPromiseFunction(options.completeMultipartUpload,{priority: 1});}if("retryDelays"inoptions){this.#retryDelayIterator=options.retryDelays?.values();}if("uploadPartBytes"inoptions){this.#uploadPartBytes=requests.wrapPromiseFunction(options.uploadPartBytes,{priority: Infinity});}if("getUploadParameters"inoptions){this.#getUploadParameters=requests.wrapPromiseFunction(options.getUploadParameters);}}async #shouldRetry(err){constrequests=this.#requests;conststatus=err?.source?.status;// TODO: this retry logic is taken out of Tus. We should have a centralized place for retrying,// perhaps the rate limited queue, and dedupe all plugins with that.if(status==null){returnfalse;}if(status===403&&err.message==="Request has expired"){if(!requests.isPaused){// We don't want to exhaust the retryDelayIterator as long as there are// more than one request in parallel, to give slower connection a chance// to catch up with the expiry set in Companion.if(requests.limit===1||this.#previousRetryDelay==null){constnext=this.#retryDelayIterator?.next();if(next==null||next.done){returnfalse;}// If there are more than 1 request done in parallel, the RLQ limit is// decreased and the failed request is requeued after waiting for a bit.// If there is only one request in parallel, the limit can't be// decreased, so we iterate over `retryDelayIterator` as we do for// other failures.// `#previousRetryDelay` caches the value so we can re-use it next time.this.#previousRetryDelay=next.value;}// No need to stop the other requests, we just want to lower the limit.requests.rateLimit(0);awaitnewPromise((resolve)=>setTimeout(resolve,this.#previousRetryDelay));}}elseif(status===429){// HTTP 429 Too Many Requests => to avoid the whole download to fail, pause all requests.if(!requests.isPaused){constnext=this.#retryDelayIterator?.next();if(next==null||next.done){returnfalse;}requests.rateLimit(next.value);}}elseif(status>400&&status<500&&status!==409){// HTTP 4xx, the server won't send anything, it's doesn't make sense to retryreturnfalse;}elseif(typeofnavigator!=="undefined"&&navigator.onLine===false){// The navigator is offline, let's wait for it to come back online.if(!requests.isPaused){requests.pause();window.addEventListener("online",()=>{requests.resume();},{once: true});}}else{// Other error code means the request can be retried later.constnext=this.#retryDelayIterator?.next();if(next==null||next.done){returnfalse;}awaitnewPromise((resolve)=>setTimeout(resolve,next.value));}returntrue;}asyncgetUploadId(file,signal){letcachedResult;// As the cache is updated asynchronously, there could be a race condition// where we just miss a new result so we loop here until we get nothing back,// at which point it's out turn to create a new cache entry.while((cachedResult=this.#cache.get(file.data))!=null){try{returnawaitcachedResult;}catch{// In case of failure, we want to ignore the cached error.// At this point, either there's a new cached value, or we'll exit the loop a create a new one.}}constpromise=this.#createMultipartUpload(this.#getFile(file),signal);constabortPromise=()=>{promise.abort(signal.reason);this.#cache.delete(file.data);};signal.addEventListener("abort",abortPromise,{once: true});this.#cache.set(file.data,promise);promise.then(async(result)=>{signal.removeEventListener("abort",abortPromise);this.#setS3MultipartState(file,result);this.#cache.set(file.data,result);},()=>{signal.removeEventListener("abort",abortPromise);this.#cache.delete(file.data);});returnpromise;}asyncabortFileUpload(file){constresult=this.#cache.get(file.data);if(result==null){// If the createMultipartUpload request never was made, we don't// need to send the abortMultipartUpload request.return;}// Remove the cache entry right away for follow-up requests do not try to// use the soon-to-be aborted chached values.this.#cache.delete(file.data);this.#setS3MultipartState(file,Object.create(null));letawaitedResult;try{awaitedResult=awaitresult;}catch{// If the cached result rejects, there's nothing to abort.return;}awaitthis.#abortMultipartUpload(this.#getFile(file),awaitedResult);}async #nonMultipartUpload(file,chunk,signal){const{
method ="POST",
url,
fields,
headers,}=awaitthis.#getUploadParameters(this.#getFile(file),{
signal,}).abortOn(signal);letbody;constdata=chunk.getData();if(method.toUpperCase()==="POST"){constformData=newFormData();Object.entries(fields).forEach(([key,value])=>formData.set(key,value));formData.set("file",data);body=formData;}else{body=data;}const{ onProgress, onComplete }=chunk;constresult=awaitthis.#uploadPartBytes({signature: { url, headers, method },
body,size: data.size,
onProgress,
onComplete,
signal,}).abortOn(signal);return"location"inresult
? result
: {location: removeMetadataFromURL(url),
...result,};}/** * @param {import("@uppy/core").UppyFile} file * @param {import("../types/chunk").Chunk[]} chunks * @param {AbortSignal} signal * @returns {Promise<void>} */asyncuploadFile(file,chunks,signal){throwIfAborted(signal);if(chunks.length===1&&!chunks[0].shouldUseMultipart){returnthis.#nonMultipartUpload(file,chunks[0],signal);}const{ uploadId, key }=awaitthis.getUploadId(file,signal);throwIfAborted(signal);try{constparts=awaitPromise.all(chunks.map((chunk,i)=>this.uploadChunk(file,i+1,chunk,signal)));throwIfAborted(signal);returnawaitthis.#sendCompletionRequest(this.#getFile(file),{ key, uploadId, parts, signal },signal).abortOn(signal);}catch(err){if(err?.cause!==pausingUploadReason&&err?.name!=="AbortError"){// We purposefully don't wait for the promise and ignore its status,// because we want the error `err` to bubble up ASAP to report it to the// user. A failure to abort is not that big of a deal anyway.this.abortFileUpload(file);}throwerr;}}restoreUploadFile(file,uploadIdAndKey){this.#cache.set(file.data,uploadIdAndKey);}asyncresumeUploadFile(file,chunks,signal){throwIfAborted(signal);if(chunks.length===1&&chunks[0]!=null&&!chunks[0].shouldUseMultipart){returnthis.#nonMultipartUpload(file,chunks[0],signal);}const{ uploadId, key }=awaitthis.getUploadId(file,signal);throwIfAborted(signal);constalreadyUploadedParts=awaitthis.#listParts(this.#getFile(file),{ uploadId, key, signal },signal).abortOn(signal);throwIfAborted(signal);constparts=awaitPromise.all(chunks.map((chunk,i)=>{constpartNumber=i+1;constalreadyUploadedInfo=alreadyUploadedParts.find(({ PartNumber })=>PartNumber===partNumber);if(alreadyUploadedInfo==null){returnthis.uploadChunk(file,partNumber,chunk,signal);}// Already uploaded chunks are set to null. If we are restoring the upload, we need to mark it as already uploaded.chunk?.setAsUploaded?.();return{PartNumber: partNumber,ETag: alreadyUploadedInfo.ETag};}));throwIfAborted(signal);returnthis.#sendCompletionRequest(this.#getFile(file),{ key, uploadId, parts, signal },signal).abortOn(signal);}/** * * @param {import("@uppy/core").UppyFile} file * @param {number} partNumber * @param {import("../types/chunk").Chunk} chunk * @param {AbortSignal} signal * @returns {Promise<object>} */asyncuploadChunk(file,partNumber,chunk,signal){const{ uploadId, key }=awaitthis.getUploadId(file,signal);throwIfAborted(signal);for(;;){constchunkData=chunk.getData();const{ onProgress, onComplete }=chunk;constsignature=awaitthis.#fetchSignature(this.#getFile(file),{
uploadId,
key,
partNumber,body: chunkData,
signal,}).abortOn(signal);throwIfAborted(signal);try{return{PartNumber: partNumber,
...(awaitthis.#uploadPartBytes({
signature,body: chunkData,size: chunkData.size,
onProgress,
onComplete,
signal,}).abortOn(signal)),};}catch(err){if(!(awaitthis.#shouldRetry(err)))throwerr;}}}}exportdefaultclassAwsS3MultipartextendsBasePlugin{staticVERSION="3.10.0";
#companionCommunicationQueue;
#client;constructor(uppy,opts){super(uppy,opts);this.type="uploader";this.id=this.opts.id||"AwsS3Multipart";this.title="AWS S3 Multipart";this.#client=newRequestClient(uppy,opts);constdefaultOptions={// TODO: null here means “include all”, [] means include none.// This is inconsistent with @uppy/aws-s3 and @uppy/transloaditallowedMetaFields: null,limit: 6,shouldUseMultipart: (file)=>file.size!==0,// TODO: Switch default to:// eslint-disable-next-line no-bitwise// shouldUseMultipart: (file) => file.size >> 10 >> 10 > 100,retryDelays: [0,1000,3000,5000],createMultipartUpload: this.createMultipartUpload.bind(this),listParts: this.listParts.bind(this),abortMultipartUpload: this.abortMultipartUpload.bind(this),completeMultipartUpload: this.completeMultipartUpload.bind(this),getTemporarySecurityCredentials: false,signPart: opts?.getTemporarySecurityCredentials
? this.createSignedURL.bind(this)
: this.signPart.bind(this),uploadPartBytes: AwsS3Multipart.uploadPartBytes,getUploadParameters: opts?.getTemporarySecurityCredentials
? this.createSignedURL.bind(this)
: this.getUploadParameters.bind(this),companionHeaders: {},};this.opts={ ...defaultOptions, ...opts};if(opts?.prepareUploadParts!=null&&opts.signPart==null){this.opts.signPart=async(file,{ uploadId, key, partNumber, body, signal })=>{const{ presignedUrls, headers }=awaitopts.prepareUploadParts(file,{
uploadId,
key,parts: [{number: partNumber,chunk: body}],
signal,});return{url: presignedUrls?.[partNumber],headers: headers?.[partNumber],};};}/** * Simultaneous upload limiting is shared across all uploads with this plugin. * * @type {RateLimitedQueue} */this.requests=this.opts.rateLimitedQueue??newRateLimitedQueue(this.opts.limit);this.#companionCommunicationQueue=newHTTPCommunicationQueue(this.requests,this.opts,this.#setS3MultipartState,this.#getFile);this.uploaders=Object.create(null);this.uploaderEvents=Object.create(null);this.uploaderSockets=Object.create(null);}[Symbol.for("uppy test: getClient")](){returnthis.#client;}setOptions(newOptions){this.#companionCommunicationQueue.setOptions(newOptions);super.setOptions(newOptions);this.#setCompanionHeaders();}/** * Clean up all references for a file's upload: the MultipartUploader instance, * any events related to the file, and the Companion WebSocket connection. * * Set `opts.abort` to tell S3 that the multipart upload is cancelled and must be removed. * This should be done when the user cancels the upload, not when the upload is completed or errored. */resetUploaderReferences(fileID,opts={}){if(this.uploaders[fileID]){this.uploaders[fileID].abort({really: opts.abort||false});this.uploaders[fileID]=null;}if(this.uploaderEvents[fileID]){this.uploaderEvents[fileID].remove();this.uploaderEvents[fileID]=null;}if(this.uploaderSockets[fileID]){this.uploaderSockets[fileID].close();this.uploaderSockets[fileID]=null;}}// TODO: make this a private method in the next majorassertHost(method){if(!this.opts.companionUrl){thrownewError(`Expected a \`companionUrl\` option containing a Companion address, or if you are not using Companion, a custom \`${method}\` implementation.`);}}createMultipartUpload(file,signal){this.assertHost("createMultipartUpload");throwIfAborted(signal);constmetadata=getAllowedMetadata({meta: file.meta,allowedMetaFields: this.opts.allowedMetaFields,});returnthis.#client.post("s3/multipart",{filename: file.name,type: file.type,
metadata,},{ signal }).then(assertServerError);}listParts(file,{ key, uploadId },signal){this.assertHost("listParts");throwIfAborted(signal);constfilename=encodeURIComponent(key);returnthis.#client.get(`s3/multipart/${uploadId}?key=${filename}`,{ signal }).then(assertServerError);}completeMultipartUpload(file,{ key, uploadId, parts },signal){this.assertHost("completeMultipartUpload");throwIfAborted(signal);constfilename=encodeURIComponent(key);constuploadIdEnc=encodeURIComponent(uploadId);returnthis.#client.post(`s3/multipart/${uploadIdEnc}/complete?key=${filename}`,{ parts },{ signal }).then(assertServerError);}/** * @type {import("../types").AwsS3STSResponse | Promise<import("../types").AwsS3STSResponse>} */
#cachedTemporaryCredentials;async #getTemporarySecurityCredentials(options){throwIfAborted(options?.signal);if(this.#cachedTemporaryCredentials==null){// We do not await it just yet, so concurrent calls do not try to override it:if(this.opts.getTemporarySecurityCredentials===true){this.assertHost("getTemporarySecurityCredentials");this.#cachedTemporaryCredentials=this.#client.get("s3/sts",null,options).then(assertServerError);}else{this.#cachedTemporaryCredentials=this.opts.getTemporarySecurityCredentials(options);}this.#cachedTemporaryCredentials=awaitthis.#cachedTemporaryCredentials;setTimeout(()=>{// At half the time left before expiration, we clear the cache. That's// an arbitrary tradeoff to limit the number of requests made to the// remote while limiting the risk of using an expired token in case the// clocks are not exactly synced.// The HTTP cache should be configured to ensure a client doesn't request// more tokens than it needs, but this timeout provides a second layer of// security in case the HTTP cache is disabled or misconfigured.this.#cachedTemporaryCredentials=null;},(getExpiry(this.#cachedTemporaryCredentials.credentials)||0)*500);}returnthis.#cachedTemporaryCredentials;}asynccreateSignedURL(file,options){constdata=awaitthis.#getTemporarySecurityCredentials(options);constexpires=getExpiry(data.credentials)||604_800;// 604 800 is the max value accepted by AWS.const{ uploadId, key, partNumber, signal }=options;// Return an object in the correct shape.return{method: "PUT",
expires,fields: {},url: `${awaitcreateSignedURL({accountKey: data.credentials.AccessKeyId,accountSecret: data.credentials.SecretAccessKey,sessionToken: data.credentials.SessionToken, expires,bucketName: data.bucket,Region: data.region,Key: key??`${crypto.randomUUID()}-${file.name}`, uploadId, partNumber, signal,})}`,// Provide content type header required by S3headers: {"Content-Type": file.type,},};}signPart(file,{ uploadId, key, partNumber, signal }){this.assertHost("signPart");throwIfAborted(signal);if(uploadId==null||key==null||partNumber==null){thrownewError("Cannot sign without a key, an uploadId, and a partNumber");}constfilename=encodeURIComponent(key);returnthis.#client.get(`s3/multipart/${uploadId}/${partNumber}?key=${filename}`,{ signal }).then(assertServerError);}abortMultipartUpload(file,{ key, uploadId },signal){this.assertHost("abortMultipartUpload");constfilename=encodeURIComponent(key);constuploadIdEnc=encodeURIComponent(uploadId);returnthis.#client.delete(`s3/multipart/${uploadIdEnc}?key=${filename}`,undefined,{
signal,}).then(assertServerError);}getUploadParameters(file,options){const{ meta }=file;const{ type,name: filename}=meta;constmetadata=getAllowedMetadata({
meta,allowedMetaFields: this.opts.allowedMetaFields,querify: true,});constquery=newURLSearchParams({ filename, type, ...metadata});returnthis.#client.get(`s3/params?${query}`,options);}staticasyncuploadPartBytes({signature: { url, expires, headers, method ="PUT"},
body,
size =body.size,
onProgress,
onComplete,
signal,}){throwIfAborted(signal);if(url==null){thrownewError("Cannot upload to an undefined URL");}returnnewPromise((resolve,reject)=>{constxhr=newXMLHttpRequest();xhr.open(method,url,true);if(headers){Object.keys(headers).forEach((key)=>{xhr.setRequestHeader(key,headers[key]);});}xhr.responseType="text";if(typeofexpires==="number"){xhr.timeout=expires*1000;}functiononabort(){xhr.abort();}functioncleanup(){signal.removeEventListener("abort",onabort);}signal.addEventListener("abort",onabort);xhr.upload.addEventListener("progress",(ev)=>{onProgress(ev);});xhr.addEventListener("abort",()=>{cleanup();reject(createAbortError());});xhr.addEventListener("timeout",()=>{cleanup();consterror=newError("Request has expired");error.source={status: 403};reject(error);});xhr.addEventListener("load",(ev)=>{cleanup();if(ev.target.status===403&&ev.target.responseText.includes("<Message>Request has expired</Message>")){consterror=newError("Request has expired");error.source=ev.target;reject(error);return;}if(ev.target.status<200||ev.target.status>=300){consterror=newError("Non 2xx");error.source=ev.target;reject(error);return;}// todo make a proper onProgress API (breaking change)onProgress?.({loaded: size,lengthComputable: true});// NOTE This must be allowed by CORS.constetag=ev.target.getResponseHeader("ETag");constlocation=ev.target.getResponseHeader("Location");if(method.toUpperCase()==="POST"&&location===null){// Not being able to read the Location header is not a fatal error.// eslint-disable-next-line no-consoleconsole.warn("AwsS3/Multipart: Could not read the Location header. This likely means CORS is not configured correctly on the S3 Bucket. See https://uppy.io/docs/aws-s3-multipart#S3-Bucket-Configuration for instructions.");}if(etag===null){reject(newError("AwsS3/Multipart: Could not read the ETag header. This likely means CORS is not configured correctly on the S3 Bucket. See https://uppy.io/docs/aws-s3-multipart#S3-Bucket-Configuration for instructions."));return;}onComplete?.(etag);resolve({ETag: etag,
...(location ? { location } : undefined),});});xhr.addEventListener("error",(ev)=>{cleanup();consterror=newError("Unknown error");error.source=ev.target;reject(error);});xhr.send(body);});}
#setS3MultipartState=(file,{ key, uploadId })=>{constcFile=this.uppy.getFile(file.id);if(cFile==null){// file was removed from storereturn;}this.uppy.setFileState(file.id,{s3Multipart: {
...cFile.s3Multipart,
key,
uploadId,},});};
#getFile=(file)=>{returnthis.uppy.getFile(file.id)||file;};
#uploadLocalFile(file){returnnewPromise((resolve,reject)=>{constonProgress=(bytesUploaded,bytesTotal)=>{this.uppy.emit("upload-progress",file,{uploader: this,
bytesUploaded,
bytesTotal,});};constonError=(err)=>{this.uppy.log(err);this.uppy.emit("upload-error",file,err);this.resetUploaderReferences(file.id);reject(err);};constonSuccess=(result)=>{constuploadResp={body: {
...result,},uploadURL: result.location,};this.resetUploaderReferences(file.id);this.uppy.emit("upload-success",this.#getFile(file),uploadResp);if(result.location){this.uppy.log(`Download ${file.name} from ${result.location}`);}resolve();};constonPartComplete=(part)=>{this.uppy.emit("s3-multipart:part-uploaded",this.#getFile(file),part);};constupload=newMultipartUploader(file.data,{// .bind to pass the file object to each handler.companionComm: this.#companionCommunicationQueue,log: (...args)=>this.uppy.log(...args),getChunkSize: this.opts.getChunkSize
? this.opts.getChunkSize.bind(this)
: null,
onProgress,
onError,
onSuccess,
onPartComplete,
file,shouldUseMultipart: this.opts.shouldUseMultipart,
...file.s3Multipart,});this.uploaders[file.id]=upload;consteventManager=newEventManager(this.uppy);this.uploaderEvents[file.id]=eventManager;eventManager.onFileRemove(file.id,(removed)=>{upload.abort();this.resetUploaderReferences(file.id,{abort: true});resolve(`upload ${removed.id} was removed`);});eventManager.onCancelAll(file.id,({ reason }={})=>{if(reason==="user"){upload.abort();this.resetUploaderReferences(file.id,{abort: true});}resolve(`upload ${file.id} was canceled`);});eventManager.onFilePause(file.id,(isPaused)=>{if(isPaused){upload.pause();}else{upload.start();}});eventManager.onPauseAll(file.id,()=>{upload.pause();});eventManager.onResumeAll(file.id,()=>{upload.start();});upload.start();});}// eslint-disable-next-line class-methods-use-this
#getCompanionClientArgs(file){return{
...file.remote.body,protocol: "s3-multipart",size: file.data.size,metadata: file.meta,};}
#upload=async(fileIDs)=>{if(fileIDs.length===0)returnundefined;constfiles=this.uppy.getFilesByIds(fileIDs);constfilesFiltered=filterNonFailedFiles(files);constfilesToEmit=filterFilesToEmitUploadStarted(filesFiltered);this.uppy.emit("upload-start",filesToEmit);constpromises=filesFiltered.map((file)=>{if(file.isRemote){constgetQueue=()=>this.requests;this.#setResumableUploadsCapability(false);constcontroller=newAbortController();constremovedHandler=(removedFile)=>{if(removedFile.id===file.id)controller.abort();};this.uppy.on("file-removed",removedHandler);constuploadPromise=this.uppy.getRequestClientForFile(file).uploadRemoteFile(file,this.#getCompanionClientArgs(file),{signal: controller.signal,
getQueue,});this.requests.wrapSyncFunction(()=>{this.uppy.off("file-removed",removedHandler);},{priority: -1})();returnuploadPromise;}returnthis.#uploadLocalFile(file);});constupload=awaitPromise.all(promises);// After the upload is done, another upload may happen with only local files.// We reset the capability so that the next upload can use resumable uploads.this.#setResumableUploadsCapability(true);returnupload;};
#setCompanionHeaders=()=>{this.#client.setCompanionHeaders(this.opts.companionHeaders);};
#setResumableUploadsCapability=(boolean)=>{const{ capabilities }=this.uppy.getState();this.uppy.setState({capabilities: {
...capabilities,resumableUploads: boolean,},});};
#resetResumableCapability=()=>{this.#setResumableUploadsCapability(true);};install(){this.#setResumableUploadsCapability(true);this.uppy.addPreProcessor(this.#setCompanionHeaders);this.uppy.addUploader(this.#upload);this.uppy.on("cancel-all",this.#resetResumableCapability);}uninstall(){this.uppy.removePreProcessor(this.#setCompanionHeaders);this.uppy.removeUploader(this.#upload);this.uppy.off("cancel-all",this.#resetResumableCapability);}}
The text was updated successfully, but these errors were encountered:
Hi @Murderlon@aduh95, thanks for assigning for further investigation. Just wondering if ye have had a chance to take a look in more detail / will this issue be resolved in a future release?
This is also happening to me. I blocked the outgoing aws connection to get a timeout error to test the upload-error hook but it never triggers the hook.
Hello, I'm having trouble reproducing, are you able to reproduce consistently the issue or is there some flakiness involved? Are you trying to upload local files, or files from a remote provider (e.g. Google Drive)? What is the behavior on the DevTools? Do you see an infinite amount of requests being sent (and at what interval are they sent), or is it just one request pending indefinitely?
For the record, I tried to reproduce using yarn dev:with-companion:
I added uppyDashboard.on('upload-error', function(...args) {debugger}) in private/dev/Dashboard.js to detect if the event is emitted.
I configured Firefox DevTools to block all https://s3.us-east-1.amazonaws.com/* requests.
I started to upload a local file.
This is also happening to me. I blocked the outgoing aws connection to get a timeout error to test the upload-error hook but it never triggers the hook.
Can you clarify if you block the outgoing connections from the client or from Companion side?
I have resolved this issue by hardcoding a custom plugin using the code before the change I highlighted was implemented.
I was able to consistently reproduce the issue at the time. Our use case is for uploading large local files to s3 using pre-signed urls returned from companion.
On loss of network connection, the pending requests just remain in that state and the browser just hangs (with the upload progress spinner still displaying that the upload is in progress).
We do not block the outgoing connections from either side.
Initial checklist
Link to runnable example
No response
Steps to reproduce
The overview of our current configuration can be seen below.
The front-end React application used by users to provide files for upload
"@uppy/aws-s3-multipart": "^3.7.0",
"@uppy/core": "^3.6.1",
"@uppy/dashboard": "^3.6.0",
"@uppy/drag-drop": "^3.0.3",
"@uppy/file-input": "^3.0.4",
"@uppy/progress-bar": "^3.0.4",
"@uppy/react": "^3.1.4",`
This is used in conjunction with Uppy Companion on the back-end.
The package from Uppy used in this service is:
"@uppy/companion": "^4.9.0",
The basic configuration there is
I have never had an issue with Uppy before and find it a great service that is provided via open source. Just spotted this as one of our clients mentioned an upload was hanging indefinitely in the browser and after a lot of debugging I was able to narrow it down to a change in the code in the aws-s3-multipart package.
Expected behavior
The upload-error event should be triggered if the upload fails beyond allotted retryDelays provided to AwsS3Multipart.
Actual behavior
The browser hangs indefinitely and the upload-error event is not triggered on loss of network connection. It looks to me like the requests are not being retried according to the provided retryInterval configuration.
I have traced this issue to this change - #4691
I have been able to resolve this by reverting the code changes in #4691 in the aws-s3-multipart package.
When I use the below code (code with the changes in #4691 reverted), the upload-error event is triggered after all of the retryIntervals have been exceeded.
all plugin code
The text was updated successfully, but these errors were encountered: