commit
c5d6bbf020
@ -3,6 +3,7 @@ package com.qiwenshare.common.config;
|
||||
import javax.annotation.PostConstruct;
|
||||
import javax.annotation.Resource;
|
||||
|
||||
import com.qiwenshare.common.util.PropertiesUtil;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.core.env.Environment;
|
||||
|
||||
|
@ -6,10 +6,14 @@ import org.springframework.cache.annotation.EnableCaching;
|
||||
import org.springframework.cache.interceptor.KeyGenerator;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.data.redis.connection.RedisConnectionFactory;
|
||||
import org.springframework.data.redis.core.RedisTemplate;
|
||||
import org.springframework.data.redis.serializer.GenericToStringSerializer;
|
||||
import org.springframework.data.redis.serializer.StringRedisSerializer;
|
||||
|
||||
@Configuration
|
||||
@EnableCaching
|
||||
public class RedisAutoConfiguration extends CachingConfigurerSupport {
|
||||
public class RedisConfig extends CachingConfigurerSupport {
|
||||
|
||||
@Bean
|
||||
public KeyGenerator keyGenerator() {
|
||||
@ -23,6 +27,26 @@ public class RedisAutoConfiguration extends CachingConfigurerSupport {
|
||||
return sb.toString();
|
||||
};
|
||||
}
|
||||
/**
|
||||
* 设置 redisTemplate 的序列化设置
|
||||
* @param redisConnectionFactory
|
||||
* @return
|
||||
*/
|
||||
@Bean
|
||||
public RedisTemplate<Object, Object> redisTemplate(RedisConnectionFactory redisConnectionFactory) {
|
||||
// 1.创建 redisTemplate 模版
|
||||
RedisTemplate<Object, Object> template = new RedisTemplate<>();
|
||||
// 2.关联 redisConnectionFactory
|
||||
template.setConnectionFactory(redisConnectionFactory);
|
||||
// 3.创建 序列化类
|
||||
GenericToStringSerializer genericToStringSerializer = new GenericToStringSerializer(Object.class);
|
||||
// 6.序列化类,对象映射设置
|
||||
// 7.设置 value 的转化格式和 key 的转化格式
|
||||
template.setValueSerializer(genericToStringSerializer);
|
||||
template.setKeySerializer(new StringRedisSerializer());
|
||||
template.afterPropertiesSet();
|
||||
return template;
|
||||
}
|
||||
|
||||
// @Bean
|
||||
// public CacheManager cacheManager(RedisConnectionFactory redisConnectionFactory) {
|
@ -2,69 +2,13 @@ package com.qiwenshare.common.domain;
|
||||
|
||||
import lombok.Data;
|
||||
|
||||
|
||||
@Data
|
||||
public class AliyunOSS {
|
||||
private boolean enabled;
|
||||
|
||||
private String endpoint;
|
||||
private String accessKeyId;
|
||||
private String accessKeySecret;
|
||||
private String bucketName;
|
||||
private String objectName;
|
||||
private String domain;
|
||||
|
||||
public boolean isEnabled() {
|
||||
return enabled;
|
||||
}
|
||||
|
||||
public void setEnabled(boolean enabled) {
|
||||
this.enabled = enabled;
|
||||
}
|
||||
|
||||
public String getEndpoint() {
|
||||
return endpoint;
|
||||
}
|
||||
|
||||
public void setEndpoint(String endpoint) {
|
||||
this.endpoint = endpoint;
|
||||
}
|
||||
|
||||
public String getAccessKeyId() {
|
||||
return accessKeyId;
|
||||
}
|
||||
|
||||
public void setAccessKeyId(String accessKeyId) {
|
||||
this.accessKeyId = accessKeyId;
|
||||
}
|
||||
|
||||
public String getAccessKeySecret() {
|
||||
return accessKeySecret;
|
||||
}
|
||||
|
||||
public void setAccessKeySecret(String accessKeySecret) {
|
||||
this.accessKeySecret = accessKeySecret;
|
||||
}
|
||||
|
||||
public String getBucketName() {
|
||||
return bucketName;
|
||||
}
|
||||
|
||||
public void setBucketName(String bucketName) {
|
||||
this.bucketName = bucketName;
|
||||
}
|
||||
|
||||
public String getObjectName() {
|
||||
return objectName;
|
||||
}
|
||||
|
||||
public void setObjectName(String objectName) {
|
||||
this.objectName = objectName;
|
||||
}
|
||||
|
||||
public String getDomain() {
|
||||
return domain;
|
||||
}
|
||||
|
||||
public void setDomain(String domain) {
|
||||
this.domain = domain;
|
||||
}
|
||||
}
|
||||
|
@ -12,6 +12,6 @@ public class FastDFSDeleter extends Deleter {
|
||||
private FastFileStorageClient fastFileStorageClient;
|
||||
@Override
|
||||
public void delete(DeleteFile deleteFile) {
|
||||
fastFileStorageClient.deleteFile(deleteFile.getFileUrl());
|
||||
fastFileStorageClient.deleteFile(deleteFile.getFileUrl().replace("M00", "group1"));
|
||||
}
|
||||
}
|
||||
|
@ -20,13 +20,15 @@ public class FastDFSDownloader extends Downloader {
|
||||
@Override
|
||||
public void download(HttpServletResponse httpServletResponse, DownloadFile downloadFile) {
|
||||
String group = downloadFile.getFileUrl().substring(0, downloadFile.getFileUrl().indexOf("/"));
|
||||
group = "group1";
|
||||
String path = downloadFile.getFileUrl().substring(downloadFile.getFileUrl().indexOf("/") + 1);
|
||||
DownloadByteArray downloadByteArray = new DownloadByteArray();
|
||||
byte[] bytes = fastFileStorageClient.downloadFile(group, path, downloadByteArray);
|
||||
|
||||
|
||||
ServletOutputStream outputStream = null;
|
||||
try {
|
||||
outputStream = httpServletResponse.getOutputStream();
|
||||
byte[] bytes = fastFileStorageClient.downloadFile(group, path, downloadByteArray);
|
||||
outputStream.write(bytes);
|
||||
} catch (IOException e) {
|
||||
e.printStackTrace();
|
||||
@ -45,6 +47,7 @@ public class FastDFSDownloader extends Downloader {
|
||||
@Override
|
||||
public InputStream getInputStream(DownloadFile downloadFile) {
|
||||
String group = downloadFile.getFileUrl().substring(0, downloadFile.getFileUrl().indexOf("/"));
|
||||
group = "group1";
|
||||
String path = downloadFile.getFileUrl().substring(downloadFile.getFileUrl().indexOf("/") + 1);
|
||||
DownloadByteArray downloadByteArray = new DownloadByteArray();
|
||||
byte[] bytes = fastFileStorageClient.downloadFile(group, path, downloadByteArray);
|
||||
|
@ -24,6 +24,4 @@ public class UploadFile {
|
||||
private long currentChunkSize;
|
||||
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
@ -2,14 +2,14 @@ package com.qiwenshare.common.operation.upload.product;
|
||||
|
||||
import com.github.tobato.fastdfs.domain.StorePath;
|
||||
import com.github.tobato.fastdfs.service.AppendFileStorageClient;
|
||||
import com.qiwenshare.common.operation.upload.domain.UploadFile;
|
||||
import com.qiwenshare.common.exception.UploadGeneralException;
|
||||
import com.qiwenshare.common.operation.upload.Uploader;
|
||||
import com.qiwenshare.common.operation.upload.domain.UploadFile;
|
||||
import com.qiwenshare.common.util.FileUtil;
|
||||
import com.qiwenshare.common.util.PathUtil;
|
||||
import com.qiwenshare.common.util.concurrent.locks.RedisLock;
|
||||
import com.qiwenshare.common.util.RedisUtil;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
//import org.apache.commons.fileupload.disk.DiskFileItemFactory;
|
||||
//import org.apache.commons.fileupload.servlet.ServletFileUpload;
|
||||
import org.apache.tomcat.util.http.fileupload.disk.DiskFileItemFactory;
|
||||
import org.apache.tomcat.util.http.fileupload.servlet.ServletFileUpload;
|
||||
import org.springframework.stereotype.Component;
|
||||
@ -19,22 +19,24 @@ import org.springframework.web.multipart.support.StandardMultipartHttpServletReq
|
||||
import javax.annotation.Resource;
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
import java.io.File;
|
||||
import java.util.*;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
@Component
|
||||
@Slf4j
|
||||
public class FastDFSUploader extends Uploader {
|
||||
public static Object lock = new Object();
|
||||
|
||||
@Resource
|
||||
AppendFileStorageClient defaultAppendFileStorageClient;
|
||||
|
||||
private static Map<String, Integer> CURRENT_UPLOAD_CHUNK_NUMBER = new HashMap<>();
|
||||
private static Map<String, Long> UPLOADED_SIZE = new HashMap<>();
|
||||
private static Map<String, String> STORE_PATH = new HashMap<>();
|
||||
private static Map<String, Object> LOCK_MAP = new HashMap<>();
|
||||
@Resource
|
||||
RedisLock redisLock;
|
||||
@Resource
|
||||
RedisUtil redisUtil;
|
||||
|
||||
@Override
|
||||
public List<UploadFile> upload(HttpServletRequest request, UploadFile uploadFile) {
|
||||
log.info("开始上传upload");
|
||||
|
||||
List<UploadFile> saveUploadFileList = new ArrayList<>();
|
||||
StandardMultipartHttpServletRequest standardMultipartHttpServletRequest = (StandardMultipartHttpServletRequest) request;
|
||||
@ -43,14 +45,11 @@ public class FastDFSUploader extends Uploader {
|
||||
if (!isMultipart) {
|
||||
throw new UploadGeneralException("未包含文件上传域");
|
||||
}
|
||||
DiskFileItemFactory dff = new DiskFileItemFactory();//1、创建工厂
|
||||
|
||||
String savePath = getLocalFileSavePath();
|
||||
dff.setRepository(new File(savePath));
|
||||
|
||||
try {
|
||||
ServletFileUpload sfu = new ServletFileUpload(dff);//2、创建文件上传解析器
|
||||
sfu.setSizeMax(this.maxSize * 1024L);
|
||||
sfu.setHeaderEncoding("utf-8");//3、解决文件名的中文乱码
|
||||
|
||||
Iterator<String> iter = standardMultipartHttpServletRequest.getFileNames();
|
||||
while (iter.hasNext()) {
|
||||
saveUploadFileList = doUpload(standardMultipartHttpServletRequest, savePath, iter, uploadFile);
|
||||
@ -70,11 +69,7 @@ public class FastDFSUploader extends Uploader {
|
||||
|
||||
try {
|
||||
MultipartFile multipartfile = standardMultipartHttpServletRequest.getFile(iter.next());
|
||||
synchronized (lock) {
|
||||
if (LOCK_MAP.get(uploadFile.getIdentifier()) == null) {
|
||||
LOCK_MAP.put(uploadFile.getIdentifier(), new Object());
|
||||
}
|
||||
}
|
||||
|
||||
uploadFileChunk(multipartfile, uploadFile);
|
||||
|
||||
String timeStampName = getTimeStampName();
|
||||
@ -95,8 +90,8 @@ public class FastDFSUploader extends Uploader {
|
||||
boolean isComplete = checkUploadStatus(uploadFile, confFile);
|
||||
if (isComplete) {
|
||||
log.info("分片上传完成");
|
||||
LOCK_MAP.remove(uploadFile.getIdentifier());
|
||||
uploadFile.setUrl(STORE_PATH.get(uploadFile.getIdentifier()));
|
||||
String path = redisUtil.getObject(uploadFile.getIdentifier() + "_storage_path");
|
||||
uploadFile.setUrl(path);
|
||||
uploadFile.setSuccess(1);
|
||||
uploadFile.setMessage("上传成功");
|
||||
} else {
|
||||
@ -116,84 +111,76 @@ public class FastDFSUploader extends Uploader {
|
||||
}
|
||||
|
||||
public void uploadFileChunk(MultipartFile multipartFile, UploadFile uploadFile) {
|
||||
redisLock.lock(uploadFile.getIdentifier());
|
||||
try {
|
||||
|
||||
synchronized (LOCK_MAP.get(uploadFile.getIdentifier())) {
|
||||
// 存储在fastdfs不带组的路径
|
||||
|
||||
log.info("当前文件的Md5:{}", uploadFile.getIdentifier());
|
||||
|
||||
log.info("当前块的大小:{}", uploadFile.getCurrentChunkSize());
|
||||
if (CURRENT_UPLOAD_CHUNK_NUMBER.get(uploadFile.getIdentifier()) == null) {
|
||||
CURRENT_UPLOAD_CHUNK_NUMBER.put(uploadFile.getIdentifier(), 1);
|
||||
if (redisUtil.getObject(uploadFile.getIdentifier() + "_current_upload_chunk_number") == null) {
|
||||
redisUtil.set(uploadFile.getIdentifier() + "_current_upload_chunk_number", 1, 1000 * 60 * 60);
|
||||
}
|
||||
|
||||
while (uploadFile.getChunkNumber() != CURRENT_UPLOAD_CHUNK_NUMBER.get(uploadFile.getIdentifier())) {
|
||||
try {
|
||||
LOCK_MAP.get(uploadFile.getIdentifier()).wait();
|
||||
} catch (InterruptedException e) {
|
||||
log.error("--------InterruptedException-------");
|
||||
e.printStackTrace();
|
||||
String currentUploadChunkNumber = redisUtil.getObject(uploadFile.getIdentifier() + "_current_upload_chunk_number");
|
||||
if (uploadFile.getChunkNumber() != Integer.parseInt(currentUploadChunkNumber)) {
|
||||
redisLock.unlock(uploadFile.getIdentifier());
|
||||
while (redisLock.tryLock(uploadFile.getIdentifier(), 300, TimeUnit.SECONDS)) {
|
||||
if (uploadFile.getChunkNumber() == Integer.parseInt(redisUtil.getObject(uploadFile.getIdentifier() + "_current_upload_chunk_number"))) {
|
||||
break;
|
||||
} else {
|
||||
redisLock.unlock(uploadFile.getIdentifier());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.info("***********开始上传第{}块**********", uploadFile.getChunkNumber());
|
||||
StorePath storePath = null;
|
||||
redisUtil.getIncr(uploadFile.getIdentifier() + "_current_upload_chunk_number");
|
||||
|
||||
try {
|
||||
if (uploadFile.getChunkNumber() <= 1) {
|
||||
log.info("上传第一块");
|
||||
|
||||
if (uploadFile.getChunkNumber() <= 1) {
|
||||
log.info("上传第一块");
|
||||
CURRENT_UPLOAD_CHUNK_NUMBER.put(uploadFile.getIdentifier(), uploadFile.getChunkNumber() + 1);
|
||||
try {
|
||||
storePath = defaultAppendFileStorageClient.uploadAppenderFile("group1", multipartFile.getInputStream(),
|
||||
multipartFile.getSize(), FileUtil.getFileExtendName(multipartFile.getOriginalFilename()));
|
||||
// 记录第一个分片上传的大小
|
||||
UPLOADED_SIZE.put(uploadFile.getIdentifier(), uploadFile.getCurrentChunkSize());
|
||||
log.info("第一块上传完成");
|
||||
if (storePath == null) {
|
||||
CURRENT_UPLOAD_CHUNK_NUMBER.put(uploadFile.getIdentifier(), uploadFile.getChunkNumber());
|
||||
log.info("获取远程文件路径出错");
|
||||
throw new UploadGeneralException("获取远程文件路径出错");
|
||||
}
|
||||
} catch (Exception e) {
|
||||
CURRENT_UPLOAD_CHUNK_NUMBER.put(uploadFile.getIdentifier(), uploadFile.getChunkNumber());
|
||||
log.error("初次上传远程文件出错", e);
|
||||
throw new UploadGeneralException("初次上传远程文件出错", e);
|
||||
}
|
||||
storePath = defaultAppendFileStorageClient.uploadAppenderFile("group1", multipartFile.getInputStream(),
|
||||
multipartFile.getSize(), FileUtil.getFileExtendName(multipartFile.getOriginalFilename()));
|
||||
// 记录第一个分片上传的大小
|
||||
redisUtil.set(uploadFile.getIdentifier() + "_uploaded_size", uploadFile.getCurrentChunkSize(), 1000 * 60 * 60);
|
||||
|
||||
STORE_PATH.put(uploadFile.getIdentifier(), storePath.getPath());
|
||||
log.info("上传文件 result = {}", storePath.getPath());
|
||||
} else {
|
||||
log.info("上传第{}块:" + uploadFile.getChunkNumber());
|
||||
CURRENT_UPLOAD_CHUNK_NUMBER.put(uploadFile.getIdentifier(), uploadFile.getChunkNumber() + 1);
|
||||
String path = STORE_PATH.get(uploadFile.getIdentifier());
|
||||
if (path == null) {
|
||||
log.error("无法获取已上传服务器文件地址");
|
||||
CURRENT_UPLOAD_CHUNK_NUMBER.put(uploadFile.getIdentifier(), uploadFile.getChunkNumber());
|
||||
throw new UploadGeneralException("无法获取已上传服务器文件地址");
|
||||
}
|
||||
try {
|
||||
Long alreadySize = UPLOADED_SIZE.get(uploadFile.getIdentifier());
|
||||
// 追加方式实际实用如果中途出错多次,可能会出现重复追加情况,这里改成修改模式,即时多次传来重复文件块,依然可以保证文件拼接正确
|
||||
defaultAppendFileStorageClient.modifyFile("group1", path, multipartFile.getInputStream(),
|
||||
multipartFile.getSize(), alreadySize);
|
||||
// 记录分片上传的大小
|
||||
UPLOADED_SIZE.put(uploadFile.getIdentifier(), alreadySize + multipartFile.getSize());
|
||||
log.info("第{}块更新完成", uploadFile.getChunkNumber());
|
||||
} catch (Exception e) {
|
||||
CURRENT_UPLOAD_CHUNK_NUMBER.put(uploadFile.getIdentifier(), uploadFile.getChunkNumber());
|
||||
log.error("更新远程文件出错", e);
|
||||
throw new UploadGeneralException("更新远程文件出错", e);
|
||||
}
|
||||
log.info("第一块上传完成");
|
||||
if (storePath == null) {
|
||||
redisUtil.set(uploadFile.getIdentifier() + "_current_upload_chunk_number", uploadFile.getChunkNumber(), 1000 * 60 * 60);
|
||||
|
||||
log.info("获取远程文件路径出错");
|
||||
throw new UploadGeneralException("获取远程文件路径出错");
|
||||
}
|
||||
} catch (Exception e) {
|
||||
log.error("上传文件错误", e);
|
||||
throw new UploadGeneralException("上传文件错误", e);
|
||||
|
||||
redisUtil.set(uploadFile.getIdentifier() + "_storage_path", storePath.getPath(), 1000 * 60 * 60);
|
||||
|
||||
log.info("上传文件 result = {}", storePath.getPath());
|
||||
} else {
|
||||
log.info("正在上传第{}块:" , uploadFile.getChunkNumber());
|
||||
|
||||
String path = redisUtil.getObject(uploadFile.getIdentifier() + "_storage_path");
|
||||
|
||||
if (path == null) {
|
||||
log.error("无法获取已上传服务器文件地址");
|
||||
throw new UploadGeneralException("无法获取已上传服务器文件地址");
|
||||
}
|
||||
|
||||
String uploadedSizeStr = redisUtil.getObject(uploadFile.getIdentifier() + "_uploaded_size");
|
||||
Long alreadySize = Long.parseLong(uploadedSizeStr);
|
||||
|
||||
// 追加方式实际实用如果中途出错多次,可能会出现重复追加情况,这里改成修改模式,即时多次传来重复文件块,依然可以保证文件拼接正确
|
||||
defaultAppendFileStorageClient.modifyFile("group1", path, multipartFile.getInputStream(),
|
||||
multipartFile.getSize(), alreadySize);
|
||||
// 记录分片上传的大小
|
||||
redisUtil.set(uploadFile.getIdentifier() + "_uploaded_size", alreadySize + multipartFile.getSize(), 1000 * 60 * 60);
|
||||
|
||||
}
|
||||
|
||||
log.info("***********第{}块上传成功**********", uploadFile.getChunkNumber());
|
||||
|
||||
LOCK_MAP.get(uploadFile.getIdentifier()).notifyAll();
|
||||
} catch (Exception e) {
|
||||
log.error("***********第{}块上传失败,自动重试**********", uploadFile.getChunkNumber());
|
||||
redisUtil.set(uploadFile.getIdentifier() + "_current_upload_chunk_number", uploadFile.getChunkNumber(), 1000 * 60 * 60);
|
||||
throw new UploadGeneralException("更新远程文件出错", e);
|
||||
} finally {
|
||||
redisLock.unlock(uploadFile.getIdentifier());
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
@ -1,7 +1,6 @@
|
||||
package com.qiwenshare.common.util;
|
||||
|
||||
import cn.hutool.core.util.RandomUtil;
|
||||
import com.qiwenshare.common.config.PropertiesUtil;
|
||||
import com.qiwenshare.common.constant.FileConstant;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.springframework.util.ResourceUtils;
|
||||
@ -10,8 +9,6 @@ import java.io.File;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.UnsupportedEncodingException;
|
||||
import java.net.URLDecoder;
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
import java.security.SecureRandom;
|
||||
|
||||
public class PathUtil {
|
||||
/**
|
||||
|
@ -1,4 +1,4 @@
|
||||
package com.qiwenshare.common.config;
|
||||
package com.qiwenshare.common.util;
|
||||
|
||||
import org.springframework.core.env.Environment;
|
||||
|
@ -1,15 +1,18 @@
|
||||
package com.qiwenshare.common.util;
|
||||
|
||||
import io.netty.util.internal.StringUtil;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.springframework.dao.DataAccessException;
|
||||
import org.springframework.data.redis.core.RedisOperations;
|
||||
import org.springframework.data.redis.core.RedisTemplate;
|
||||
import org.springframework.data.redis.core.SessionCallback;
|
||||
import org.springframework.data.redis.support.atomic.RedisAtomicLong;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import javax.annotation.Resource;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.locks.Lock;
|
||||
|
||||
@Component
|
||||
@Slf4j
|
||||
@ -76,4 +79,20 @@ public class RedisUtil {
|
||||
// 删除key即可释放锁
|
||||
deleteKey(key);
|
||||
}
|
||||
|
||||
/**
|
||||
* @Description: 获取自增长值
|
||||
* @param key key
|
||||
* @return
|
||||
*/
|
||||
public Long getIncr(String key) {
|
||||
Long count = redisTemplate.opsForValue().increment(key, 1);
|
||||
return count;
|
||||
}
|
||||
|
||||
// public static void main(String[] args) {
|
||||
// Lock
|
||||
// }
|
||||
|
||||
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
package com.qiwenshare.common.util;
|
||||
package com.qiwenshare.common.util.concurrent.locks;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
@ -7,19 +7,24 @@ import org.springframework.data.redis.connection.ReturnType;
|
||||
import org.springframework.data.redis.core.RedisCallback;
|
||||
import org.springframework.data.redis.core.RedisTemplate;
|
||||
import org.springframework.data.redis.core.types.Expiration;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import javax.annotation.Resource;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.locks.Condition;
|
||||
|
||||
/**
|
||||
* redis实现分布式锁
|
||||
*
|
||||
*/
|
||||
public class RedisLockUtils {
|
||||
@Component
|
||||
public class RedisLock{
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(RedisLockUtils.class);
|
||||
private static final Logger log = LoggerFactory.getLogger(RedisLock.class);
|
||||
|
||||
/**
|
||||
* 默认轮休获取锁间隔时间, 单位:毫秒
|
||||
@ -28,6 +33,12 @@ public class RedisLockUtils {
|
||||
|
||||
private static final String UNLOCK_LUA;
|
||||
|
||||
private static final long LOCK_EXPIRE_TIME = 60 * 15; //获取锁最大15分钟就会过期
|
||||
|
||||
|
||||
@Resource
|
||||
RedisTemplate<String, Object> redisTemplate;
|
||||
|
||||
static {
|
||||
StringBuilder lua = new StringBuilder();
|
||||
lua.append("if redis.call(\"get\",KEYS[1]) == ARGV[1] ");
|
||||
@ -39,38 +50,17 @@ public class RedisLockUtils {
|
||||
UNLOCK_LUA = lua.toString();
|
||||
}
|
||||
|
||||
private RedisTemplate redisTemplate;
|
||||
|
||||
private final ThreadLocal<Map<String, LockVO>> lockMap = new ThreadLocal<>();
|
||||
|
||||
public RedisLockUtils(RedisTemplate redisTemplate) {
|
||||
this.redisTemplate = redisTemplate;
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取锁,没有获取到则一直等待
|
||||
*
|
||||
* @param key redis key
|
||||
* @param expire 锁过期时间, 单位 秒
|
||||
*/
|
||||
public void lock(final String key, long expire) {
|
||||
try {
|
||||
acquireLock(key, expire, -1);
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException("acquire lock exception", e);
|
||||
}
|
||||
}
|
||||
public void lock(final String key) {
|
||||
|
||||
/**
|
||||
* 获取锁,指定时间内没有获取到,返回false。否则 返回true
|
||||
*
|
||||
* @param key redis key
|
||||
* @param expire 锁过期时间, 单位 秒
|
||||
* @param waitTime 获取锁超时时间, -1代表永不超时, 单位 秒
|
||||
*/
|
||||
public boolean tryLock(final String key, long expire, long waitTime) {
|
||||
try {
|
||||
return acquireLock(key, expire, waitTime);
|
||||
acquireLock(key, LOCK_EXPIRE_TIME, -1);
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException("acquire lock exception", e);
|
||||
}
|
||||
@ -89,6 +79,28 @@ public class RedisLockUtils {
|
||||
}
|
||||
}
|
||||
|
||||
public boolean tryLock(final String key) {
|
||||
try {
|
||||
return acquireLock(key, LOCK_EXPIRE_TIME, -1);
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException("acquire lock exception", e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取锁,指定时间内没有获取到,返回false。否则 返回true
|
||||
*
|
||||
* @param key redis key
|
||||
* @param waitTime 获取锁超时时间, -1代表永不超时, 单位 秒
|
||||
*/
|
||||
public boolean tryLock(String key, long time, TimeUnit unit) {
|
||||
try {
|
||||
return acquireLock(key, LOCK_EXPIRE_TIME, unit.toSeconds(time));
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException("acquire lock exception", e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @param key redis key
|
||||
* @param expire 锁过期时间, 单位 秒
|
||||
@ -130,21 +142,7 @@ public class RedisLockUtils {
|
||||
return false;
|
||||
}
|
||||
|
||||
private boolean acquired(String key) {
|
||||
Map<String, LockVO> map = lockMap.get();
|
||||
if (map == null || map.size() == 0 || !map.containsKey(key)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
LockVO vo = map.get(key);
|
||||
if (vo.beforeExpireTime < System.currentTimeMillis()) {
|
||||
log.debug("lock {} maybe release, because timeout ", key);
|
||||
return false;
|
||||
}
|
||||
int after = ++vo.count;
|
||||
log.debug("acquire lock {} {} ", key, after);
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* 释放锁
|
||||
@ -181,10 +179,18 @@ public class RedisLockUtils {
|
||||
* @return if true success else fail
|
||||
*/
|
||||
private boolean tryLock(String key, long expire, String lockId) {
|
||||
RedisCallback<Boolean> callback = (connection) ->
|
||||
connection.set((key).getBytes(StandardCharsets.UTF_8),
|
||||
lockId.getBytes(StandardCharsets.UTF_8), Expiration.seconds(expire), RedisStringCommands.SetOption.SET_IF_ABSENT);
|
||||
return (Boolean) redisTemplate.execute(callback);
|
||||
try{
|
||||
RedisCallback<Boolean> callback = (connection) ->
|
||||
connection.set(
|
||||
(key).getBytes(StandardCharsets.UTF_8),
|
||||
lockId.getBytes(StandardCharsets.UTF_8),
|
||||
Expiration.seconds(expire),
|
||||
RedisStringCommands.SetOption.SET_IF_ABSENT);
|
||||
return (Boolean) redisTemplate.execute(callback);
|
||||
} catch (Exception e) {
|
||||
log.error("redis lock error.", e);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private static class LockVO {
|
||||
@ -214,5 +220,21 @@ public class RedisLockUtils {
|
||||
}
|
||||
}
|
||||
|
||||
private boolean acquired(String key) {
|
||||
Map<String, LockVO> map = lockMap.get();
|
||||
if (map == null || map.size() == 0 || !map.containsKey(key)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
LockVO vo = map.get(key);
|
||||
if (vo.beforeExpireTime < System.currentTimeMillis()) {
|
||||
log.debug("lock {} maybe release, because timeout ", key);
|
||||
return false;
|
||||
}
|
||||
int after = ++vo.count;
|
||||
log.debug("acquire lock {} {} ", key, after);
|
||||
return true;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -3,8 +3,13 @@ package com.qiwenshare.file;
|
||||
import org.mybatis.spring.annotation.MapperScan;
|
||||
import org.springframework.boot.SpringApplication;
|
||||
import org.springframework.boot.autoconfigure.SpringBootApplication;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.ComponentScan;
|
||||
import org.springframework.context.annotation.FilterType;
|
||||
import org.springframework.data.redis.connection.RedisConnectionFactory;
|
||||
import org.springframework.data.redis.core.RedisTemplate;
|
||||
import org.springframework.data.redis.serializer.GenericToStringSerializer;
|
||||
import org.springframework.data.redis.serializer.StringRedisSerializer;
|
||||
import org.springframework.scheduling.annotation.EnableScheduling;
|
||||
import org.springframework.transaction.annotation.EnableTransactionManagement;
|
||||
|
||||
@ -24,6 +29,4 @@ public class FileApplication {
|
||||
SpringApplication.run(FileApplication.class, args);
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
@ -13,11 +13,23 @@ import javax.annotation.Resource;
|
||||
import java.util.List;
|
||||
import java.util.Queue;
|
||||
|
||||
/**
|
||||
* 文件逻辑处理组件
|
||||
*/
|
||||
@Component
|
||||
public class FileDealComp {
|
||||
@Resource
|
||||
UserFileMapper userFileMapper;
|
||||
|
||||
/**
|
||||
* 获取重复文件名
|
||||
*
|
||||
* 场景: 文件还原时,在 savefilePath 路径下,保存 测试.txt 文件重名,则会生成 测试(1).txt
|
||||
*
|
||||
* @param userFile
|
||||
* @param savefilePath
|
||||
* @return
|
||||
*/
|
||||
public String getRepeatFileName(UserFile userFile, String savefilePath) {
|
||||
String fileName = userFile.getFileName();
|
||||
String extendName = userFile.getExtendName();
|
||||
@ -57,6 +69,13 @@ public class FileDealComp {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* 还原父文件路径
|
||||
*
|
||||
* 回收站文件还原操作会将文件恢复到原来的路径下,当还原文件的时候,如果父目录已经不存在了,则需要把父母录给还原
|
||||
* @param filePath
|
||||
* @param sessionUserId
|
||||
*/
|
||||
public void restoreParentFilePath(String filePath, Long sessionUserId) {
|
||||
String parentFilePath = PathUtil.getParentPath(filePath);
|
||||
while(parentFilePath.contains("/")) {
|
||||
@ -84,6 +103,14 @@ public class FileDealComp {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* 删除重复的子目录文件
|
||||
*
|
||||
* 当还原目录的时候,如果其子目录在文件系统中已存在,则还原之后进行去重操作
|
||||
* @param filePath
|
||||
* @param sessionUserId
|
||||
*/
|
||||
public void deleteRepeatSubDirFile(String filePath, Long sessionUserId) {
|
||||
LambdaQueryWrapper<UserFile> lambdaQueryWrapper = new LambdaQueryWrapper<>();
|
||||
|
||||
@ -108,6 +135,14 @@ public class FileDealComp {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 组织一个树目录节点,文件移动的时候使用
|
||||
* @param treeNode
|
||||
* @param id
|
||||
* @param filePath
|
||||
* @param nodeNameQueue
|
||||
* @return
|
||||
*/
|
||||
public TreeNode insertTreeNode(TreeNode treeNode, long id, String filePath, Queue<String> nodeNameQueue){
|
||||
|
||||
List<TreeNode> childrenTreeNodes = treeNode.getChildren();
|
||||
@ -152,6 +187,12 @@ public class FileDealComp {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* 判断该路径在树节点中是否已经存在
|
||||
* @param childrenTreeNodes
|
||||
* @param path
|
||||
* @return
|
||||
*/
|
||||
public boolean isExistPath(List<TreeNode> childrenTreeNodes, String path){
|
||||
boolean isExistPath = false;
|
||||
|
||||
|
@ -104,7 +104,8 @@ public class FileController {
|
||||
@GetMapping(value = "/search")
|
||||
@MyLog(operation = "文件搜索", module = CURRENT_MODULE)
|
||||
@ResponseBody
|
||||
public RestResult<SearchHits<FileSearch>> searchFile(SearchFileDTO searchFileDTO) {
|
||||
public RestResult<SearchHits<FileSearch>> searchFile(SearchFileDTO searchFileDTO, @RequestHeader("token") String token) {
|
||||
UserBean sessionUserBean = userService.getUserBeanByToken(token);
|
||||
NativeSearchQueryBuilder queryBuilder = new NativeSearchQueryBuilder();
|
||||
HighlightBuilder.Field allHighLight = new HighlightBuilder.Field("*").preTags("<span class='keyword'>")
|
||||
.postTags("</span>");
|
||||
@ -131,7 +132,10 @@ public class FileController {
|
||||
queryBuilder.withPageable(PageRequest.of(currentPage, pageCount, Sort.by(direction, order)));
|
||||
}
|
||||
|
||||
queryBuilder.withQuery(QueryBuilders.matchQuery("fileName", searchFileDTO.getFileName()));
|
||||
queryBuilder.withQuery(QueryBuilders.boolQuery()
|
||||
.must(QueryBuilders.matchQuery("fileName", searchFileDTO.getFileName()))
|
||||
.must(QueryBuilders.termQuery("userId", sessionUserBean.getUserId()))
|
||||
);
|
||||
SearchHits<FileSearch> search = elasticsearchRestTemplate.search(queryBuilder.build(), FileSearch.class);
|
||||
|
||||
return RestResult.success().data(search);
|
||||
|
@ -62,22 +62,19 @@ public class UserController {
|
||||
@MyLog(operation = "用户登录", module = CURRENT_MODULE)
|
||||
@ResponseBody
|
||||
public RestResult<UserLoginVo> userLogin(
|
||||
@Parameter(description = "登录手机号") String username,
|
||||
@Parameter(description = "登录手机号") String telephone,
|
||||
@Parameter(description = "登录密码") String password) {
|
||||
RestResult<UserLoginVo> restResult = new RestResult<UserLoginVo>();
|
||||
UserBean saveUserBean = userService.findUserInfoByTelephone(username);
|
||||
UserBean saveUserBean = userService.findUserInfoByTelephone(telephone);
|
||||
|
||||
if (saveUserBean == null) {
|
||||
return RestResult.fail().message("用户名或手机号不存在!");
|
||||
return RestResult.fail().message("手机号或密码错误!");
|
||||
}
|
||||
String jwt = "";
|
||||
try {
|
||||
jwt = JjwtUtil.createJWT("qiwenshare", "qiwen", JSON.toJSONString(saveUserBean));
|
||||
} catch (Exception e) {
|
||||
log.info("登录失败:{}", e);
|
||||
restResult.setSuccess(false);
|
||||
restResult.setMessage("登录失败!");
|
||||
return restResult;
|
||||
return RestResult.fail().message("创建token失败!");
|
||||
}
|
||||
|
||||
String passwordHash = new SimpleHash("MD5", password, saveUserBean.getSalt(), 1024).toHex();
|
||||
@ -86,14 +83,11 @@ public class UserController {
|
||||
UserLoginVo userLoginVo = new UserLoginVo();
|
||||
BeanUtil.copyProperties(saveUserBean, userLoginVo);
|
||||
userLoginVo.setToken("Bearer " + jwt);
|
||||
restResult.setData(userLoginVo);
|
||||
restResult.setSuccess(true);
|
||||
return RestResult.success().data(userLoginVo);
|
||||
} else {
|
||||
restResult.setSuccess(false);
|
||||
restResult.setMessage("手机号或密码错误!");
|
||||
return RestResult.fail().message("手机号或密码错误!");
|
||||
}
|
||||
|
||||
return restResult;
|
||||
}
|
||||
|
||||
@Operation(summary = "检查用户登录信息", description = "验证token的有效性", tags = {"user"})
|
||||
|
@ -1,5 +1,5 @@
|
||||
|
||||
#jdbc连接
|
||||
#jdbc连接-h2数据库
|
||||
spring.datasource.driverClassName=org.h2.Driver
|
||||
spring.datasource.url = jdbc:h2:file:D:/temp_db/file;MODE=MYSQL;DATABASE_TO_LOWER=TRUE
|
||||
spring.datasource.username=sa
|
||||
|
@ -1,5 +1,5 @@
|
||||
|
||||
#jdbc连接
|
||||
#jdbc连接-mysql数据库
|
||||
spring.datasource.driverClassName=com.mysql.cj.jdbc.Driver
|
||||
spring.datasource.url = jdbc:mysql://localhost:3306/file?serverTimezone=GMT%2B8&useUnicode=true&characterEncoding=utf-8&allowMultiQueries=true
|
||||
spring.datasource.username=root
|
||||
|
@ -1,8 +1,4 @@
|
||||
#!/bin/bash
|
||||
#. /etc/profile
|
||||
#. ~/.bashrc
|
||||
#. ~/.bash_profile
|
||||
#support jsch commons
|
||||
|
||||
cd `dirname $0`
|
||||
BIN_DIR=`pwd`
|
||||
@ -21,22 +17,18 @@ LIB_DIR=$DEPLOY_DIR/lib
|
||||
SERVER_NAME=`cat $CONF_DIR/config/application.properties | grep -w "spring.application.name" | grep -v "#" | awk -F= 'NR==1{print $2}'`
|
||||
SERVER_PORT=`cat $CONF_DIR/config/application.properties | grep -w "server.port" | grep -v "#" | awk -F= 'NR==1{print $2}'`
|
||||
|
||||
#REM **********************************************************************************************
|
||||
|
||||
LOG_PATH=$DEPLOY_DIR/log/qiwen-file
|
||||
GC_LOG_PATH=$DEPLOY_DIR/log/qiwen-file/gc
|
||||
|
||||
if [ "${LOG_PATH}" == "" ] ; then
|
||||
LOG_PATH=$PARENT_DIR/logs/${SERVER_NAME}
|
||||
fi
|
||||
if [ "${GC_LOG_PATH}" == "" ] ; then
|
||||
GC_LOG_PATH=$PARENT_DIR/logs/${SERVER_NAME}/gclog
|
||||
fi
|
||||
|
||||
|
||||
if [ ! -d ${LOG_PATH} ];then
|
||||
mkdir -p ${LOG_PATH}
|
||||
fi
|
||||
if [ ! -d ${GC_LOG_PATH} ];then
|
||||
mkdir -p ${GC_LOG_PATH}
|
||||
fi
|
||||
|
||||
|
||||
STDOUT_FILE=${LOG_PATH}/nohup.out
|
||||
|
||||
@ -54,7 +46,7 @@ fi
|
||||
USER_VMARGS="-D64 -server -Xmx1g -Xms1g -Xmn521m -Xss256k "
|
||||
|
||||
GC_OPTS=""
|
||||
#GC_OPTS="-XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=$LOG_PATH/heapdump.$$.hprof -XX:ErrorFile=$LOG_PATH/hs_err_pid$$.log -Xloggc:$GC_LOG_PATH/gc.$$.log -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+UnlockExperimentalVMOptions -XX:+UseG1GC "
|
||||
|
||||
|
||||
JMX_PORT=""
|
||||
JAVA_JMX_OPTS=""
|
||||
@ -68,10 +60,6 @@ fi
|
||||
#fi
|
||||
|
||||
JAVA_OPTS=""
|
||||
#if [ "${JAVA_OPTS}" != "" ]; then
|
||||
# JAVA_OPTS=" -Djava.awt.headless=true -Djava.net.preferIPv4Stack=true "
|
||||
#fi
|
||||
#REM **********************************************************************************************
|
||||
|
||||
#prevent repeated start
|
||||
PIDS=`ps -ef | grep java | grep "$CONF_DIR" |awk '{print $2}'`
|
||||
@ -90,40 +78,20 @@ if [ -n "$SERVER_PORT" ]; then
|
||||
fi
|
||||
#=====================
|
||||
|
||||
######################cloud trace##########################
|
||||
TRACE_CLOUD_TRACE_URL=http://localhost:9411
|
||||
TRACE_CLOUD_TRACE_PROBABILITY=1.0
|
||||
TRACE_CLOUD_TRACE_ENABLED=true
|
||||
TRACE_LIB_JARS="/opt/cloud-trace-deps-1.1.0/*"
|
||||
echo "Using TRACE_LIB_JARS: $TRACE_LIB_JARS"
|
||||
if [ -n "$TRACE_CLOUD_TRACE_URL" ]; then
|
||||
TRACE_OPTS="-Dcloud.trace.url=$TRACE_CLOUD_TRACE_URL"
|
||||
fi
|
||||
if [ -n "$TRACE_CLOUD_TRACE_PROBABILITY" ]; then
|
||||
TRACE_OPTS="$TRACE_OPTS -Dcloud.trace.probability=$TRACE_CLOUD_TRACE_PROBABILITY"
|
||||
fi
|
||||
if [ -n "$TRACE_CLOUD_TRACE_ENABLED" ]; then
|
||||
TRACE_OPTS="$TRACE_OPTS -Dcloud.trace.enabled=$TRACE_CLOUD_TRACE_ENABLED"
|
||||
fi
|
||||
#echo "Using TRACE_OPTS: $TRACE_OPTS"
|
||||
##########################################################
|
||||
|
||||
|
||||
LIB_JARS=$DEPLOY_DIR/lib/*
|
||||
echo "Using LIB_JARS: $LIB_JARS"
|
||||
echo "Using CONF_DIR: $CONF_DIR"
|
||||
echo "Using TRACE_LIB_JARS: $TRACE_LIB_JARS"
|
||||
|
||||
CLASSPATH=".:$CONF_DIR:$LIB_JARS:$TRACE_LIB_JARS"
|
||||
CLASSPATH=".:$CONF_DIR:$LIB_JARS"
|
||||
|
||||
EXEC_CMDLINE="${JAVA_HOME}/bin/java -classpath ${CLASSPATH} ${TRACE_OPTS} ${USER_VMARGS} ${GC_OPTS} ${JAVA_JMX_OPTS} ${JAVA_DEBUG} ${JAVA_OPTS} com.qiwenshare.file.FileApplication"
|
||||
EXEC_CMDLINE="${JAVA_HOME}/bin/java -classpath ${CLASSPATH} ${USER_VMARGS} ${GC_OPTS} ${JAVA_JMX_OPTS} ${JAVA_DEBUG} ${JAVA_OPTS} com.qiwenshare.file.FileApplication"
|
||||
|
||||
echo "Start app command line: ${EXEC_CMDLINE}" >> $STDOUT_FILE
|
||||
echo "Starting $SERVER_NAME ..."
|
||||
|
||||
nohup ${EXEC_CMDLINE} >> $STDOUT_FILE 2>&1 &
|
||||
|
||||
###wait app start listener port, only wait 120s
|
||||
|
||||
COUNT=0
|
||||
while [ $COUNT -lt 120 ]; do
|
||||
echo -e ".\c"
|
||||
|
Loading…
Reference in New Issue
Block a user