file.js 1.4 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253
  1. import SparkMD5 from 'spark-md5'
  2. const chunkSize = 10 * 1024 * 1024
  3. /**
  4. * 分片读取文件 MD5
  5. */
  6. export const getFileMD5 = (file, callback) => {
  7. const blobSlice = File.prototype.slice || File.prototype.mozSlice || File.prototype.webkitSlice
  8. const fileReader = new FileReader()
  9. // 计算分片数
  10. const totalChunks = Math.ceil(file.size / chunkSize)
  11. // console.log('总分片数:' + totalChunks)
  12. let currentChunk = 0
  13. const spark = new SparkMD5.ArrayBuffer()
  14. loadNext()
  15. fileReader.onload = function (e) {
  16. try {
  17. spark.append(e.target.result)
  18. } catch (error) {
  19. // console.log('获取Md5错误:' + currentChunk)
  20. }
  21. if (currentChunk < totalChunks) {
  22. currentChunk++
  23. loadNext()
  24. } else {
  25. callback(spark.end())
  26. }
  27. }
  28. fileReader.onerror = function () {
  29. console.warn('读取Md5失败,文件读取错误')
  30. }
  31. function loadNext() {
  32. const start = currentChunk * chunkSize
  33. const end = ((start + chunkSize) >= file.size) ? file.size : start + chunkSize
  34. // 注意这里的 fileRaw
  35. fileReader.readAsArrayBuffer(blobSlice.call(file, start, end))
  36. }
  37. }
  38. /**
  39. * 文件分片
  40. */
  41. export const createFileChunk = (file) => {
  42. const fileChunkList = []
  43. let count = 0
  44. while (count < file.size) {
  45. fileChunkList.push({
  46. file: file.slice(count, count + chunkSize),
  47. })
  48. count += chunkSize
  49. }
  50. return fileChunkList
  51. }