原理:
## 分片上传:将一个文件根据一定大小分成多个小块,然后分别上传各个小块,最后当块全部上传完成后,再将所以块按照顺序拼接成一个文件,就完成了分片上传。
## 断点续传:服务端记录传输中断的块编号和块的偏移值,续传前告诉前端这两个值,然后前端跳过已经上传的数据块接着传。
## 快传:就是在断点续传的基础上,跳过已经上传的块,不重复上传
实现逻辑:
## 分片上传:
①前端上报文件分片上传的配置,每个块的大小、总的块数量、整个文件的大小、文件名称、文件md5值等信息。
②上传每个块文件、块编号和块的偏移值等。
③所有块上传完成,发起合并块的请求,后端按照块编号合并所有块到一个新的文件中,合成完后删除所有块文件。
## 断点续传:
①当上传中断,后端记录断开的块编号和偏移值。
②前端发起续传前,先去后端获取文件的续传信息,主要是续传的块编号和偏移值
③根据续传信息发起续传
## 快传:
①就是根据后端的记录数据,判断块是否已经上传,如果已经上传过,就直接返回前端上传完成,让前端进行后续块的上传。
代码片段:
①后端相关代码
package cn.mx.modules.upload.contrtoller;
import cn.mx.common.ResultModel;
import cn.mx.common.security.log.Log;
import cn.mx.common.security.log.LogType;
import cn.mx.modules.upload.bo.ChunkUploadBo;
import cn.mx.modules.upload.bo.FileSliceConfBo;
import cn.mx.modules.upload.service.UploadService;
import lombok.extern.slf4j.Slf4j;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
import javax.annotation.Resource;
import javax.validation.Valid;
/**
* @author yxp
* @date 2024/2/20
* @desc 文件上传-分片上传、断点续传
**/
@RestController
@RequestMapping("/upload/fileUpload")
@Slf4j
@Log(title = "文件上传")
public class FileUploadController {
@Resource
private UploadService uploadService;
@PostMapping("/chunk/register")
@Log(logType = LogType.OTHER, content = "注册文件上传信息")
public ResultModel registerFileSliceInfo(FileSliceConfBo bo) {
return uploadService.registerFileSliceInfo(bo);
}
@PostMapping("/chunk/upload")
@Log(logType = LogType.OTHER, content = "文件分片上传")
public ResultModel uploadChunk(@Valid ChunkUploadBo bo) {
return uploadService.uploadChunk(bo);
}
@PostMapping("/chunk/merge")
@Log(logType = LogType.OTHER, content = "文件分片合并")
public ResultModel mergeFile(FileSliceConfBo bo) {
return uploadService.mergeFile(bo);
}
@PostMapping("/chunk/delete")
@Log(logType = LogType.OTHER, content = "文件分片信息删除")
public ResultModel deleteFileInfo(FileSliceConfBo bo) {
return uploadService.deleteFileInfo(bo);
}
}
package cn.mx.modules.upload.service;
import cn.mx.common.ResultModel;
import cn.mx.modules.upload.bo.ChunkUploadBo;
import cn.mx.modules.upload.bo.FileSliceConfBo;
/**
* @author yxp
* @date 2024/2/7
* @desc 分片上传和断点续传相关方法
**/
public interface UploadService {
/**
* 注册文件上传信息-如果已存在,返回续传进度等信息
*
* @param bo
* @return
*/
ResultModel registerFileSliceInfo(FileSliceConfBo bo);
/**
* 文件块上传
*
* @param bo
* @return
*/
ResultModel uploadChunk(ChunkUploadBo bo);
/**
* 文件块合并-同时删除临时块文件等数据
*
* @param bo
* @return
*/
ResultModel mergeFile(FileSliceConfBo bo);
/**
* 删除文件数据-删除数据库同时删除文件服务器的相关数据
*
* @param bo
* @return
*/
ResultModel deleteFileInfo(FileSliceConfBo bo);
}
package cn.mx.modules.upload.service.impl;
import cn.mx.common.ResultModel;
import cn.mx.modules.upload.bo.ChunkUploadBo;
import cn.mx.modules.upload.bo.FileSliceConfBo;
import cn.mx.modules.upload.entity.FileUpload;
import cn.mx.modules.upload.mapper.FileUploadMapper;
import cn.mx.modules.upload.service.UploadService;
import cn.hutool.core.io.FileUtil;
import com.baomidou.mybatisplus.mapper.EntityWrapper;
import com.baomidou.mybatisplus.mapper.Wrapper;
import org.json.JSONObject;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
import javax.annotation.Resource;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.RandomAccessFile;
import java.util.ArrayList;
import java.util.List;
/**
* @author yxp
* @date 2024/2/8
* @desc ***
**/
@Service
public class UploadServiceImpl implements UploadService {
private final static Logger log = LoggerFactory.getLogger(UploadServiceImpl.class);
@Value("${upload.path}")
private String BASE_PATH;
@Resource
private FileUploadMapper fileUploadMapper;
@Override
public ResultModel registerFileSliceInfo(FileSliceConfBo bo) {
JSONObject resJson = new JSONObject();
File dir = new File(BASE_PATH + File.separator + bo.getFileMd5());
if (!dir.exists()) {
boolean dirCreate = dir.mkdirs();
if (!dirCreate) {
return ResultModel.fail("创建分片文件目录失败,目录为:" + BASE_PATH + File.separator + bo.getFileMd5());
}
}
Wrapper<FileUpload> confWrapper = new EntityWrapper<>();
confWrapper.eq("md5", bo.getFileMd5());
List<FileUpload> uploads = fileUploadMapper.selectList(confWrapper);
if (!uploads.isEmpty()) {
FileUpload upload = uploads.get(0);
String fileName = upload.getName();
String fileMd5 = upload.getMd5();
if (!bo.getFileName().equals(fileName) || !bo.getFileMd5().equals(fileMd5)) {
return ResultModel.fail("文件被篡改,不能续传,请先删除原有数据再重新上传");
}
boolean finish = upload.getFinish();
if (finish) {
resJson.put("fileName", upload.getName());
resJson.put("path", upload.getPath());
resJson.put("finish", finish);
return ResultModel.success(resJson.toString());
} else {
resJson.put("chunkNum", upload.getChunkNum());
resJson.put("chunkOffset", upload.getChunkOffset());
resJson.put("finish", finish);
return ResultModel.success(resJson.toString());
}
} else {
JSONObject confJson = new JSONObject();
confJson.put("fileName", bo.getFileName());
confJson.put("fileMd5", bo.getFileMd5());
confJson.put("chunkCount", bo.getTotalChunks());
confJson.put("chunkSize", bo.getChunkSize());
confJson.put("chunkNum", 1);
confJson.put("chunkOffset", 0);
confJson.put("finish", false);
// 创建文件上传的注册信息
FileUpload fileUpload = new FileUpload();
fileUpload.setName(bo.getFileName());
fileUpload.setTotalSize(bo.getFileSize());
fileUpload.setMd5(bo.getFileMd5());
fileUpload.setPath(BASE_PATH + File.separator + bo.getFileMd5() + File.separator + bo.getFileName());
fileUpload.setChunkCount(bo.getTotalChunks());
fileUpload.setChunkSize(bo.getChunkSize());
fileUpload.setChunkNum(1L);
fileUpload.setChunkOffset(0L);
fileUpload.setFinish(false);
fileUploadMapper.insert(fileUpload);
return ResultModel.success(confJson.toString());
}
}
@Override
public ResultModel uploadChunk(ChunkUploadBo bo) {
// 先检测分块是否存在,如果存在则直接返回
File file = new File(BASE_PATH + File.separator + bo.getFileMd5() + File.separator + bo.getChunkNum());
if (file.exists() && file.length() == bo.getChunkSize()) {
return ResultModel.success("分片:" + bo.getChunkNum() + ",上传完成");
}
// 最后一个分片已上传完毕,还没有合并分片情况
long lastChunkSize = bo.getFileSize() % bo.getChunkSize();
if (file.exists() && bo.isLastChunk() && file.length() == lastChunkSize) {
return ResultModel.success("分片全部上传完成,请发起合并请求");
}
// 获取该文件的注册信息
Wrapper<FileUpload> fileWrapper = new EntityWrapper<>();
fileWrapper.eq("md5", bo.getFileMd5());
List<FileUpload> uploads = fileUploadMapper.selectList(fileWrapper);
if (uploads.isEmpty()) {
return ResultModel.fail("没有获取到文件上传注册信息,请重新上传");
}
FileUpload fileUpload = uploads.get(0);
fileUpload.setChunkNum(bo.getChunkNum());
//保存分片
long chunkOffset = 0;
try {
InputStream ins = bo.getChunkFile().getInputStream();
FileOutputStream ous = new FileOutputStream(file);
byte[] readBytes = new byte[1024];
//断点续传
if (bo.getOffset() != null && bo.getOffset() > 0) {
RandomAccessFile accessFile = new RandomAccessFile(file, "rw");
// 设置偏移量
accessFile.seek(bo.getOffset());
chunkOffset += bo.getOffset();
while (ins.read(readBytes, bo.getOffset().intValue(), 1024) != -1) {
accessFile.write(readBytes);
chunkOffset += readBytes.length;
}
accessFile.close();
} else {
while (ins.read(readBytes) != -1) {
ous.write(readBytes);
ous.flush();
chunkOffset += readBytes.length;
}
ous.close();
ins.close();
}
} catch (IOException e) {
fileUpload.setChunkOffset(chunkOffset);
fileUploadMapper.updateById(fileUpload);
return ResultModel.fail("分片:" + bo.getChunkNum() + ",上传失败,请重试");
}
fileUpload.setChunkOffset(chunkOffset);
fileUploadMapper.updateById(fileUpload);
return ResultModel.success("分片:" + bo.getChunkNum() + ",上传成功!");
}
@Override
public ResultModel mergeFile(FileSliceConfBo bo) {
// 获取注册信息
Wrapper<FileUpload> wrapper = new EntityWrapper<>();
wrapper.eq("md5", bo.getFileMd5());
List<FileUpload> uploads = fileUploadMapper.selectList(wrapper);
if (uploads.isEmpty()) {
return ResultModel.fail("无效的分片合并请求");
}
FileUpload fileUpload = uploads.get(0);
if (fileUpload.getFinish()) {
return ResultModel.fail("该文件已完成分片合并");
}
// 分片文件列表,后面删除分片文件使用
List<File> chunkFileList = new ArrayList<>();
// 合并分片
File file = new File(BASE_PATH + File.separator + bo.getFileMd5() + File.separator + fileUpload.getName());
try {
FileOutputStream ous = new FileOutputStream(file);
for (int i = 1; i <= fileUpload.getChunkCount(); i++) {
byte[] bytes = new byte[1024];
File chunkFile = new File(BASE_PATH + File.separator + bo.getFileMd5() + File.separator + i);
if (!chunkFile.exists()) {
//修改当前分片为缺失的分片号,使其可以通过快传进行分片的续传
fileUpload.setChunkNum((long) i);
fileUpload.setChunkOffset(0L);
fileUploadMapper.updateById(fileUpload);
// 删除合并的文件,并返回错误
file.delete();
return ResultModel.fail("该文件分片数据不完整,无法合并,请重新上传");
}
chunkFileList.add(chunkFile);
FileInputStream chunkIns = new FileInputStream(chunkFile);
while (chunkIns.read(bytes) != -1) {
ous.write(bytes);
}
ous.flush();
chunkIns.close();
}
ous.close();
} catch (IOException e) {
log.error("合并分片文件失败,文件路径为:{},失败原因为:{}", BASE_PATH + File.separator + bo.getFileMd5(), e.getMessage());
return ResultModel.fail("合并分片文件失败,失败原因:" + e.getMessage());
}
//全部合并完成,更新数据库并删除临时分片数据
fileUpload.setPath(file.getAbsolutePath());
fileUpload.setFinish(true);
fileUploadMapper.updateById(fileUpload);
for (File chunkFile : chunkFileList) {
chunkFile.delete();
}
return ResultModel.success("'" + file.getName() + "'文件合并完成");
}
@Override
public ResultModel deleteFileInfo(FileSliceConfBo bo) {
// 获取数据库对应文件数据
Wrapper<FileUpload> queWrapper = new EntityWrapper<>();
queWrapper.eq("name", bo.getFileName());
queWrapper.eq("total_size", bo.getFileSize());
List<FileUpload> uploads = fileUploadMapper.selectList(queWrapper);
if (uploads.isEmpty()) {
log.error("删除已上传文件没有找到记录,文件名为:{},文件大小为:{}", bo.getFileName(), bo.getFileSize());
return ResultModel.success();
}
for (FileUpload upload : uploads) {
// 删除文件服务器数据
File dir = new File(BASE_PATH + File.separator + upload.getMd5());
FileUtil.del(dir);
//删除数据库数据
Integer delete = fileUploadMapper.deleteById(upload.getId());
if (delete < 1) {
return ResultModel.fail("没有该文件的数据,无需删除");
}
}
return ResultModel.success();
}
}
package cn.mx.modules.upload.bo;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import lombok.Data;
import javax.validation.constraints.NotBlank;
/**
* @author yxp
* @date 2024/2/7
* @desc 文件上传分片配置类
**/
@Data
@ApiModel("文件上传分片配置类")
public class FileSliceConfBo {
@ApiModelProperty("文件名-合并块时使用")
private String fileName;
@ApiModelProperty("文件MD5值,防止篡改")
private String fileMd5;
@ApiModelProperty("文件总大小")
private Long fileSize;
@ApiModelProperty("每个块的大小")
private Long chunkSize;
@ApiModelProperty("块的总数量")
private Long totalChunks;
}
package cn.mx.modules.upload.bo;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import lombok.Data;
import org.springframework.web.multipart.MultipartFile;
import javax.validation.constraints.NotNull;
/**
* @author yxp
* @date 2024/2/7
* @desc 文件块上传请求类
**/
@Data
@ApiModel("文件块上传请求类")
public class ChunkUploadBo {
@ApiModelProperty("块文件")
@NotNull(message = "上传文件块不能为空")
private MultipartFile chunkFile;
@ApiModelProperty("块编号")
private Long chunkNum;
@ApiModelProperty("块大小")
private Long chunkSize;
@ApiModelProperty("文件块偏移量-主要用于断点续传")
private Long offset;
@ApiModelProperty("文件总大小")
private Long fileSize;
@ApiModelProperty("整个文件的md5值,防止传输篡改")
private String fileMd5;
@ApiModelProperty("是否是文件的最后一块")
private boolean isLastChunk;
}
②数据库设计
CREATE TABLE `file_upload` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`name` varchar(255) NOT NULL COMMENT '文件名称',
`total_size` bigint(20) NOT NULL COMMENT '文件总大小',
`md5` varchar(255) NOT NULL COMMENT '文件Md5值',
`path` varchar(255) NOT NULL COMMENT '文件路径',
`chunk_count` int(10) NOT NULL DEFAULT '1' COMMENT '分片数量',
`chunk_size` int(10) DEFAULT NULL COMMENT '分片大小',
`chunk_num` int(10) NOT NULL DEFAULT '1' COMMENT '当前分片号',
`chunk_offset` int(10) DEFAULT NULL COMMENT '分片偏移值',
`finish` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否上传完成(主要是是否合成了分片)',
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1 COMMENT='文件上传表';
③前端相关,参考了网上部分代码
<template>
<el-upload
:http-request="chunkedUpload"
:ref="chunkedUpload"
:action="uploadUrl"
:data="uploadData"
:on-error="onError"
:before-remove="beforeRemove"
name="file">
<el-button size="small" type="primary">点击上传</el-button>
</el-upload>
</template>
<script>
import chunkedUpload from '@dataeye/api/dataeye/chunkedUpload'
import request from '@dataeye/api/common/request'
import {Message} from "element-ui";
export default {
name: "FileChunkUpload",
data() {
return {
uploadData: {
//这里面放额外携带的参数
},
//文件上传的路径
uploadUrl: '/dataeye/fileUpload/chunk/upload', //文件上传的路径
chunkedUpload: chunkedUpload // 分片上传自定义方法,在头部引入了
}
},
methods: {
onError(err, file, fileList) {
console.log(err)
this.$alert('文件上传失败,请重试', '错误', {
confirmButtonText: '确定'
})
},
beforeRemove(file) {
// 调用删除接口
const data = {
fileName: file.name, // 文件名
fileSize: file.size // 文件大小
}
request({
method: 'post',
url: '/dataeye/fileUpload/chunk/delete',
data: data
}).then(res => {
if (res.data.code !== '200') {
Message({
type: 'error',
message: res.data.msg
})
} else {
Message({
type: 'success',
message: '删除成功'
})
}
})
}
}
}
</script>
<style scoped>
</style>
import SparkMD5 from 'spark-md5'
// 将axios换成封装好的axios
// import axios from 'axios'
import request from '@dataeye/api/common/request'
import store from '@comm/store'
import {Message} from 'element-ui'
import Cookies from "js-cookie";
// 如果上传错误,获取报错信息
function getError(action, option, xhr) {
let msg
if (xhr.response) {
msg = `${xhr.response.msg || xhr.response}`
} else if (xhr.responseText) {
msg = `${xhr.responseText}`
} else {
msg = `fail to post ${action} ${xhr.status}`
}
const err = new Error(msg)
err.status = xhr.status
err.method = 'post'
err.url = action
return err
}
// 上传成功完成合并之后,获取服务器返回的信息
function getBody(xhr) {
const text = xhr.msg || xhr.msg
if (!text) {
return text
}
try {
return JSON.parse(text)
} catch (e) {
return text
}
}
// 分片上传的自定义请求,以下请求会覆盖element的默认上传行为
export default function upload(option) {
if (typeof XMLHttpRequest === 'undefined') {
return
}
const spark = new SparkMD5.ArrayBuffer()// md5的ArrayBuffer加密类
const fileReader = new FileReader()// 文件读取类
const action = option.action // 文件上传上传路径
const chunkSize = 1024 * 1024 * 30 // 单个分片大小
let chunkContinuesNum = 0
let chunkOffset = 0 // 当前分片的偏移值
let md5 = '' // 文件的唯一标识
const optionFile = option.file // 需要分片的文件
let fileChunkedList = [] // 文件分片完成之后的数组
const percentage = [] // 文件上传进度的数组,单项就是一个分片的进度
// 首次进来并没有选择文件,因此直接return
if (option.file === undefined) {
return
}
// 文件开始分片,push到fileChunkedList数组中
for (let i = 0; i < optionFile.size; i = i + chunkSize) {
const tmp = optionFile.slice(i, Math.min((i + chunkSize), optionFile.size))
fileChunkedList.push(tmp)
// 将每一个分片的数据读取出来
while (fileReader.readyState !== 1) {
fileReader.readAsArrayBuffer(tmp)
break
}
}
// 在文件读取完毕之后,开始计算文件md5,作为文件唯一标识
fileReader.onload = async (e) => {
spark.append(e.target.result)
md5 = spark.end()
console.log('文件md5为--------', md5)
// 将fileChunkedList转成FormData对象,并加入上传时需要的数据
fileChunkedList = fileChunkedList.map((item, index) => {
const formData = new FormData()
if (option.data) {
// 额外加入外面传入的data数据
Object.keys(option.data).forEach(key => {
formData.append(key, option.data[key])
})
// 这些字段看后端需要哪些,就传哪些,也可以自己追加额外参数
formData.append('chunkFile', item, option.file.name)// 文件
formData.append('chunkNum', index + 1)// 当前文件块
formData.append('chunkSize', chunkSize)// 单个分块大小
// formData.append('currentChunkSize', item.size)// 当前分块大小
if (index + 1 === chunkContinuesNum) {
formData.append('offset', chunkOffset)// 当前分块偏移值
}
formData.append('fileSize', optionFile.size)// 文件总大小
formData.append('fileMd5', md5)// 文件标识
formData.append('totalChunks', fileChunkedList.length)// 总块数
}
return {formData: formData, index: index}
})
// 更新上传进度条百分比的方法
const updataPercentage = (e) => {
let loaded = 0// 当前已经上传文件的总大小
percentage.forEach(item => {
loaded += item
})
e.percent = loaded / optionFile.size * 100
option.onProgress(e)
}
// 创建队列上传任务,limit是上传并发数
function sendRequest(chunks, limit = 1) {
return new Promise((resolve, reject) => {
const len = chunks.length
let counter = 0
let isStop = false
const start = async () => {
if (isStop) {
return
}
const item = chunks.shift()
if (item) {
// 断点续传,跳过已上传的分片
if (counter + 1 >= chunkContinuesNum) {
// 给后端发送文件合并请求
const chunkUpload = await request({
method: 'post',
url: action,
data: item.formData
})
if (chunkUpload.data.code !== '200') {
Message({
type: 'error',
message: chunkUpload.data.msg
})
isStop = true
reject(chunkUpload)
}
}
if (counter === len - 1) {
// 最后一个上传完成
resolve()
} else {
counter++
// 更新进度条
percentage[counter] = chunkSize
updataPercentage(e)
start()
}
}
}
while (limit > 0) {
setTimeout(() => {
start()
}, Math.random() * 1000)
limit -= 1
}
})
}
try {
// 先注册/获取上传文件信息
const registerData = {
fileName: option.file.name, // 文件名-合并块时使用
fileMd5: md5, // 文件MD5值,防止篡改
fileSize: optionFile.size, // 文件总大小
chunkSize: chunkSize, // 每个块的大小
totalChunks: fileChunkedList.length // 块的总数量
}
// 给后端发送文件合并请求
const registerResInfo = await request({
method: 'post',
url: '/dataeye/fileUpload/chunk/register',
data: registerData
})
if (registerResInfo.data.code !== '200') {
Message({
type: 'error',
message: registerResInfo.data.msg
})
return
} else {
let resJson = JSON.parse(registerResInfo.data.data)
if (resJson.finish) {
Message({
type: 'success',
message: '已经上传,不需重复上传!'
})
return
}
}
// 断点续传设置续传块编号和偏移值
let registerResData = JSON.parse(registerResInfo.data.data)
chunkContinuesNum = registerResData.chunkNum
chunkOffset = registerResData.chunkOffset
// 调用上传队列方法 等待所有文件上传完成
await sendRequest(fileChunkedList, 1)
// 这里的参数根据自己实际情况写
const data = {
fileMd5: md5, // 文件MD5值,防止篡改
chunkSize: chunkSize, // 每个块的大小
totalChunks: registerData.totalChunks // 块的总数量
}
// 给后端发送文件合并请求
const fileInfo = await request({
method: 'post',
url: '/dataeye/fileUpload/chunk/merge',
data: data
})
if (fileInfo.data.code === '200') {
const success = getBody(fileInfo.data)
option.onSuccess(success)
return
}
} catch (error) {
option.onError(error)
}
}
}