1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253 |
- import SparkMD5 from 'spark-md5'
- const chunkSize = 10 * 1024 * 1024
- /**
- * 分片读取文件 MD5
- */
- export const getFileMD5 = (file, callback) => {
- const blobSlice = File.prototype.slice || File.prototype.mozSlice || File.prototype.webkitSlice
- const fileReader = new FileReader()
- // 计算分片数
- const totalChunks = Math.ceil(file.size / chunkSize)
- // console.log('总分片数:' + totalChunks)
- let currentChunk = 0
- const spark = new SparkMD5.ArrayBuffer()
- loadNext()
- fileReader.onload = function (e) {
- try {
- spark.append(e.target.result)
- } catch (error) {
- // console.log('获取Md5错误:' + currentChunk)
- }
- if (currentChunk < totalChunks) {
- currentChunk++
- loadNext()
- } else {
- callback(spark.end())
- }
- }
- fileReader.onerror = function () {
- console.warn('读取Md5失败,文件读取错误')
- }
- function loadNext() {
- const start = currentChunk * chunkSize
- const end = ((start + chunkSize) >= file.size) ? file.size : start + chunkSize
- // 注意这里的 fileRaw
- fileReader.readAsArrayBuffer(blobSlice.call(file, start, end))
- }
- }
- /**
- * 文件分片
- */
- export const createFileChunk = (file) => {
- const fileChunkList = []
- let count = 0
- while (count < file.size) {
- fileChunkList.push({
- file: file.slice(count, count + chunkSize),
- })
- count += chunkSize
- }
- return fileChunkList
- }
|