Browse Source

提交代码,解决Bug问题。

Signed-off-by: zouap <zouap@pcl.ac.cn>
pull/3226/head
zouap 2 years ago
parent
commit
a423bdaf80
2 changed files with 111 additions and 127 deletions
  1. +6
    -5
      models/file_chunk.go
  2. +105
    -122
      routers/repo/attachment_model.go

+ 6
- 5
models/file_chunk.go View File

@@ -30,8 +30,9 @@ type FileChunk struct {

type ModelFileChunk struct {
ID int64 `xorm:"pk autoincr"`
UUID string `xorm:"uuid UNIQUE"`
UUID string `xorm:"INDEX"`
Md5 string `xorm:"INDEX"`
ModelUUID string `xorm:"INDEX"`
ObjectName string `xorm:"DEFAULT ''"`
IsUploaded int `xorm:"DEFAULT 0"` // not uploaded: 0, uploaded: 1
UploadID string `xorm:"UNIQUE"` //minio upload id
@@ -65,14 +66,14 @@ func GetFileChunkByMD5AndUser(md5 string, userID int64, typeCloudBrain int) (*Fi
return getFileChunkByMD5AndUser(x, md5, userID, typeCloudBrain)
}

func GetModelFileChunkByMD5AndUser(md5 string, userID int64, typeCloudBrain int) (*ModelFileChunk, error) {
return getModelFileChunkByMD5AndUser(x, md5, userID, typeCloudBrain)
func GetModelFileChunkByMD5AndUser(md5 string, userID int64, typeCloudBrain int, uuid string) (*ModelFileChunk, error) {
return getModelFileChunkByMD5AndUser(x, md5, userID, typeCloudBrain, uuid)
}

func getModelFileChunkByMD5AndUser(e Engine, md5 string, userID int64, typeCloudBrain int) (*ModelFileChunk, error) {
func getModelFileChunkByMD5AndUser(e Engine, md5 string, userID int64, typeCloudBrain int, uuid string) (*ModelFileChunk, error) {
fileChunk := new(ModelFileChunk)

if has, err := e.Where("md5 = ? and user_id = ? and type = ?", md5, userID, typeCloudBrain).Get(fileChunk); err != nil {
if has, err := e.Where("md5 = ? and user_id = ? and type = ? and model_uuid= ?", md5, userID, typeCloudBrain, uuid).Get(fileChunk); err != nil {
return nil, err
} else if !has {
return nil, ErrFileChunkNotExist{md5, ""}


+ 105
- 122
routers/repo/attachment_model.go View File

@@ -31,7 +31,7 @@ func GetModelChunks(ctx *context.Context) {
return
}

fileChunk, err := models.GetModelFileChunkByMD5AndUser(fileMD5, ctx.User.ID, typeCloudBrain)
fileChunk, err := models.GetModelFileChunkByMD5AndUser(fileMD5, ctx.User.ID, typeCloudBrain, modeluuid)
if err != nil {
if models.IsErrFileChunkNotExist(err) {
ctx.JSON(200, map[string]string{
@@ -79,6 +79,21 @@ func GetModelChunks(ctx *context.Context) {
log.Error("UpdateFileChunk failed:", err.Error())
}
}
modelname := ""
model, err := models.QueryModelById(modeluuid)
if err == nil && model != nil {
modelname = model.Name
}
ctx.JSON(200, map[string]string{
"uuid": fileChunk.UUID,
"uploaded": strconv.Itoa(fileChunk.IsUploaded),
"uploadID": fileChunk.UploadID,
"chunks": string(chunks),
"attachID": "0",
"modeluuid": modeluuid,
"fileName": fileName,
"modelName": modelname,
})
} else {
if fileChunk.IsUploaded == models.FileUploaded {
log.Info("the file has been recorded but not uploaded")
@@ -107,120 +122,103 @@ func GetModelChunks(ctx *context.Context) {
"uploadID": "",
"chunks": "",
})
return
}
}

var attachID int64
attach, err := models.GetAttachmentByUUID(fileChunk.UUID)
if err != nil {
if models.IsErrAttachmentNotExist(err) {
attachID = 0
} else {
ctx.ServerError("GetAttachmentByUUID", err)
return
}
} else {
attachID = attach.ID
}

if attach == nil {
ctx.JSON(200, map[string]string{
"uuid": fileChunk.UUID,
"uploaded": strconv.Itoa(fileChunk.IsUploaded),
"uploadID": fileChunk.UploadID,
"chunks": string(chunks),
"attachID": "0",
"datasetID": "0",
"fileName": "",
"datasetName": "",
})
return
}

//使用description存储模型信息
dbmodeluuid := attach.Description
modelname := ""
if dbmodeluuid != modeluuid {
log.Info("The file has uploaded.fileChunk.ObjectName=" + fileChunk.ObjectName + " typeCloudBrain=" + fmt.Sprint(typeCloudBrain))
isExist := copyModelAttachmentFile(typeCloudBrain, fileChunk, fileName, modeluuid)
if isExist {
model, err := models.QueryModelById(modeluuid)
if err == nil && model != nil {
modelname = model.Name
}
ctx.JSON(200, map[string]string{
"uuid": fileChunk.UUID,
"uploaded": strconv.Itoa(fileChunk.IsUploaded),
"uploadID": fileChunk.UploadID,
"chunks": string(chunks),
"attachID": strconv.Itoa(int(attachID)),
"modeluuid": modeluuid,
"fileName": attach.Name,
"modelName": modelname,
})
} else {
UpdateModelSize(modeluuid)
ctx.JSON(200, map[string]string{
"uuid": fileChunk.UUID,
"uploaded": strconv.Itoa(fileChunk.IsUploaded),
"uploadID": fileChunk.UploadID,
"chunks": string(chunks),
"attachID": strconv.Itoa(int(attachID)),
"fileName": attach.Name,
"uuid": fileChunk.UUID,
"uploaded": strconv.Itoa(fileChunk.IsUploaded),
"uploadID": fileChunk.UploadID,
"chunks": string(chunks),
"attachID": "0",
"datasetID": "0",
"fileName": "",
"datasetName": "",
})
}
return
} else {
model, err := models.QueryModelById(dbmodeluuid)
if err == nil {
modelname = model.Name
}
ctx.JSON(200, map[string]string{
"uuid": fileChunk.UUID,
"uploaded": strconv.Itoa(fileChunk.IsUploaded),
"uploadID": fileChunk.UploadID,
"chunks": string(chunks),
"attachID": strconv.Itoa(int(attachID)),
"modeluuid": dbmodeluuid,
"fileName": attach.Name,
"modelName": modelname,
})
return
}

}

func copyModelAttachmentFile(typeCloudBrain int, fileChunk *models.ModelFileChunk, fileName, modeluuid string) bool {
srcObjectName := fileChunk.ObjectName
var isExist bool
//copy
destObjectName := getObjectName(fileName, modeluuid)
if typeCloudBrain == models.TypeCloudBrainOne {
bucketName := setting.Attachment.Minio.Bucket
log.Info("minio copy..srcObjectName=" + srcObjectName + " bucketName=" + bucketName)
if storage.MinioGetFilesSize(bucketName, []string{destObjectName}) > 0 {
isExist = true
} else {
// //使用description存储模型信息
// dbmodeluuid := attach.Description
// modelname := ""
// if dbmodeluuid != modeluuid {
// log.Info("The file has uploaded.fileChunk.ObjectName=" + fileChunk.ObjectName + " typeCloudBrain=" + fmt.Sprint(typeCloudBrain))
// isExist := copyModelAttachmentFile(typeCloudBrain, fileChunk, fileName, modeluuid)
// if isExist {
// model, err := models.QueryModelById(modeluuid)
// if err == nil && model != nil {
// modelname = model.Name
// }
// ctx.JSON(200, map[string]string{
// "uuid": fileChunk.UUID,
// "uploaded": strconv.Itoa(fileChunk.IsUploaded),
// "uploadID": fileChunk.UploadID,
// "chunks": string(chunks),
// "attachID": strconv.Itoa(int(attachID)),
// "modeluuid": modeluuid,
// "fileName": attach.Name,
// "modelName": modelname,
// })
// } else {
// UpdateModelSize(modeluuid)
// ctx.JSON(200, map[string]string{
// "uuid": fileChunk.UUID,
// "uploaded": strconv.Itoa(fileChunk.IsUploaded),
// "uploadID": fileChunk.UploadID,
// "chunks": string(chunks),
// "attachID": strconv.Itoa(int(attachID)),
// "fileName": attach.Name,
// })
// }
// return
// } else {
// model, err := models.QueryModelById(dbmodeluuid)
// if err == nil {
// modelname = model.Name
// }
// ctx.JSON(200, map[string]string{
// "uuid": fileChunk.UUID,
// "uploaded": strconv.Itoa(fileChunk.IsUploaded),
// "uploadID": fileChunk.UploadID,
// "chunks": string(chunks),
// "attachID": strconv.Itoa(int(attachID)),
// "modeluuid": dbmodeluuid,
// "fileName": attach.Name,
// "modelName": modelname,
// })
// return
// }

log.Info("minio copy..srcObjectName=" + srcObjectName + " bucketName=" + bucketName)
storage.MinioCopyAFile(bucketName, srcObjectName, bucketName, destObjectName)
}
} else {
bucketName := setting.Bucket
log.Info("obs copy..srcObjectName=" + srcObjectName + " bucketName=" + bucketName + " destObjectName=" + destObjectName)
size := storage.ObsGetFilesSize(bucketName, []string{destObjectName})
log.Info("size=" + fmt.Sprint(size))
if size > 0 {
isExist = true
} else {
log.Info("obs copy..srcObjectName=" + srcObjectName + " bucketName=" + bucketName)
storage.ObsCopyFile(bucketName, srcObjectName, bucketName, destObjectName)
}
}
return isExist
}

// func copyModelAttachmentFile(typeCloudBrain int, fileChunk *models.ModelFileChunk, fileName, modeluuid string) bool {
// srcObjectName := fileChunk.ObjectName
// var isExist bool
// //copy
// destObjectName := getObjectName(fileName, modeluuid)
// if typeCloudBrain == models.TypeCloudBrainOne {
// bucketName := setting.Attachment.Minio.Bucket
// log.Info("minio copy..srcObjectName=" + srcObjectName + " bucketName=" + bucketName)
// if storage.MinioGetFilesSize(bucketName, []string{destObjectName}) > 0 {
// isExist = true
// } else {

// log.Info("minio copy..srcObjectName=" + srcObjectName + " bucketName=" + bucketName)
// storage.MinioCopyAFile(bucketName, srcObjectName, bucketName, destObjectName)
// }
// } else {
// bucketName := setting.Bucket
// log.Info("obs copy..srcObjectName=" + srcObjectName + " bucketName=" + bucketName + " destObjectName=" + destObjectName)
// size := storage.ObsGetFilesSize(bucketName, []string{destObjectName})
// log.Info("size=" + fmt.Sprint(size))
// if size > 0 {
// isExist = true
// } else {
// log.Info("obs copy..srcObjectName=" + srcObjectName + " bucketName=" + bucketName)
// storage.ObsCopyFile(bucketName, srcObjectName, bucketName, destObjectName)
// }
// }
// return isExist
// }

func getObjectName(filename string, modeluuid string) string {
return strings.TrimPrefix(path.Join(Model_prefix, path.Join(modeluuid[0:1], modeluuid[1:2], modeluuid, filename)), "/")
}
@@ -286,6 +284,7 @@ func NewModelMultipart(ctx *context.Context) {
Md5: ctx.Query("md5"),
Size: fileSize,
ObjectName: objectName,
ModelUUID: modeluuid,
TotalChunks: totalChunkCounts,
Type: typeCloudBrain,
})
@@ -361,7 +360,6 @@ func CompleteModelMultipart(ctx *context.Context) {
uuid := ctx.Query("uuid")
uploadID := ctx.Query("uploadID")
typeCloudBrain := ctx.QueryInt("type")
fileName := ctx.Query("file_name")
modeluuid := ctx.Query("modeluuid")
log.Warn("uuid:" + uuid)
log.Warn("modeluuid:" + modeluuid)
@@ -406,21 +404,6 @@ func CompleteModelMultipart(ctx *context.Context) {
//更新模型大小信息
UpdateModelSize(modeluuid)

_, err = models.InsertAttachment(&models.Attachment{
UUID: uuid,
UploaderID: ctx.User.ID,
IsPrivate: true,
Name: fileName,
Size: ctx.QueryInt64("size"),
DatasetID: 0,
Description: modeluuid,
Type: typeCloudBrain,
})

if err != nil {
ctx.Error(500, fmt.Sprintf("InsertAttachment: %v", err))
return
}
ctx.JSON(200, map[string]string{
"result_code": "0",
})


Loading…
Cancel
Save