Skip to content

Instantly share code, notes, and snippets.

@leefsmp
Last active August 16, 2022 06:47
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save leefsmp/f66c86930e038d2d808808cb95c027c5 to your computer and use it in GitHub Desktop.
Save leefsmp/f66c86930e038d2d808808cb95c027c5 to your computer and use it in GitHub Desktop.
Forge resumable upload (node.js)
/////////////////////////////////////////////////////////
// Uploads object to bucket using resumable endpoint
//
/////////////////////////////////////////////////////////
uploadObjectChunked (
getToken,
bucketKey, objectKey,
file,
opts = {}) {
return new Promise((resolve, reject) => {
const chunkSize = opts.chunkSize || 5 * 1024 * 1024
const nbChunks = Math.ceil(file.size / chunkSize)
const chunksMap = Array.from({
length: nbChunks
}, (e, i) => i)
// generates uniques session ID
const sessionId = this.guid()
// prepare the upload tasks
const uploadTasks = chunksMap.map((chunkIdx) => {
const start = chunkIdx * chunkSize
const end = Math.min(
file.size, (chunkIdx + 1) * chunkSize) - 1
const range = `bytes ${start}-${end}/${file.size}`
const length = end - start + 1
const readStream =
fs.createReadStream(file.path, {
start, end
})
const run = async () => {
const token = await getToken()
return this._objectsAPI.uploadChunk(
bucketKey, objectKey,
length, range, sessionId,
readStream, {},
{autoRefresh: false}, token)
}
return {
chunkIndex: chunkIdx,
run
}
})
let progress = 0
// runs asynchronously in parallel the upload tasks
// number of simultaneous uploads is defined by
// opts.concurrentUploads
eachLimit(uploadTasks, opts.concurrentUploads || 3,
(task, callback) => {
task.run().then((res) => {
progress += 100.0 / nbChunks
if (opts.onProgress) {
opts.onProgress ({
progress: Math.round(progress * 100) / 100,
chunkIndex: task.chunkIndex
})
}
callback ()
}, (err) => {
if (opts.onError) {
opts.onError(err)
}
callback(err)
})
}, (err) => {
if (!err && opts.onComplete) {
opts.onComplete ()
}
})
resolve({
fileSize: file.size,
bucketKey,
objectKey,
nbChunks
})
})
}
@cozybim
Copy link

cozybim commented Aug 14, 2022

Follow your instruction. I modified a little by my own. It's running but can not send all for large file (let say 300 or 600 MB). This is my code. Please help:

async uploadObjectChunked(token, bucketKey, objectKey, file, opts, client) {
    try {
      const chunkSize = opts.chunkSize || 5 * 1024 * 1024

      const nbChunks = Math.ceil(file.size / chunkSize)

      const chunksMap = Array.from({
          length: nbChunks
        }, (e, i) => i)

      // generates uniques session ID
      const sessionId = uuidv4()
        
      // prepare the upload tasks
      const uploadTasks = chunksMap.map((chunkIdx) => {

      const start = chunkIdx * chunkSize

      const end = Math.min(
        file.size, (chunkIdx + 1) * chunkSize) - 1

      const range = `bytes ${start}-${end}/${file.size}`

      const length = end - start + 1

      const readStream =
        fs.createReadStream(file.path, {
          start, end
        })

      const run = async () => {
        const result = new ObjectsApi().uploadChunk(
          bucketKey, objectKey,
          length, range, sessionId,
          readStream, {},
          client, token);
        
          return result;
      }
      return {
        chunkIndex: chunkIdx,
        run
      }
      })

      let progress = 0

      const runTask = async (task) => {
        await task.run();
        progress += 100.0 / nbChunks

        opts.onProgress && opts.onProgress ({
          progress: Math.round(progress * 100) / 100,
          chunkIndex: task.chunkIndex
        })
      };

      const runMultiTasks = async (coll, limit, asyncFunc ) => {
        let ret = [];
        const splitArr = coll.reduce(
          (acc, item, i) => (i % limit)
          ? acc 
          : [...acc, coll.slice(i, i + limit)], []
        )
        console.log(splitArr);
        for (let i = 0; i < splitArr.length; i++) {
          ret[i] = await Promise.all(splitArr[i].map(async task => await asyncFunc(task)));
          console.log(`Sending package -${i*3}-${i*3 + 3}/${splitArr.length * 3}`);
        }
        
        return ret;
      }

      await runMultiTasks(uploadTasks, opts.concurrentUploads || 5, runTask);
      opts.onComplete && opts.onComplete();

      return {
        fileSize: file.size,
        bucketKey,
        objectKey,
        nbChunks
      }
    } catch (error) {
      opts.onError && opts.onError(error)
    }
  }

  async uploadLargeObject(bucketKey, file, originalname, forgeConfig) {
    try { 
      const opts = {
        chunkSize: 5 * 1024 * 1024, //5MB chunks
        concurrentUploads: 3,
      }
      const token = await getInternalToken(forgeConfig);
      const client = getClient(forgeConfig.scopes.internal, forgeConfig);
  
      const response =await this.uploadObjectChunked(token, bucketKey, originalname, file, opts, client);
      return response;
    } catch (error) {
      console.log(error)
    }

  }

@leefsmp
Copy link
Author

leefsmp commented Aug 16, 2022

I suggest you contact directly Autodesk support as I'm no longer working as advocate for that company. In addition to that this upload API is deprecated and will be removed pretty soon: https://forge.autodesk.com/blog/data-management-oss-object-storage-service-migrating-direct-s3-approach

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment