// Copyright 2017 The Gitea Authors. All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package repo import ( contexExt "context" "encoding/json" "errors" "fmt" "mime/multipart" "net/http" "path" "strconv" "strings" "code.gitea.io/gitea/models" "code.gitea.io/gitea/modules/context" "code.gitea.io/gitea/modules/log" "code.gitea.io/gitea/modules/minio_ext" "code.gitea.io/gitea/modules/setting" "code.gitea.io/gitea/modules/storage" "code.gitea.io/gitea/modules/upload" "code.gitea.io/gitea/modules/worker" gouuid "github.com/satori/go.uuid" ) const ( //result of decompress DecompressSuccess = "0" DecompressFailed = "1" ) type CloudBrainDataset struct { UUID string `json:"id"` Name string `json:"name"` Path string `json:"place"` UserName string `json:"provider"` CreateTime string `json:"created_at"` } type UploadForm struct { UploadID string `form:"uploadId"` UuID string `form:"uuid"` PartSize int64 `form:"size"` Offset int64 `form:"offset"` PartNumber int `form:"chunkNumber"` PartFile multipart.File `form:"file"` } func RenderAttachmentSettings(ctx *context.Context) { renderAttachmentSettings(ctx) } func renderAttachmentSettings(ctx *context.Context) { ctx.Data["IsAttachmentEnabled"] = setting.Attachment.Enabled ctx.Data["AttachmentStoreType"] = setting.Attachment.StoreType ctx.Data["AttachmentAllowedTypes"] = setting.Attachment.AllowedTypes ctx.Data["AttachmentMaxSize"] = setting.Attachment.MaxSize ctx.Data["AttachmentMaxFiles"] = setting.Attachment.MaxFiles } // UploadAttachment response for uploading issue's attachment func UploadAttachment(ctx *context.Context) { if !setting.Attachment.Enabled { ctx.Error(404, "attachment is not enabled") return } file, header, err := ctx.Req.FormFile("file") if err != nil { ctx.Error(500, fmt.Sprintf("FormFile: %v", err)) return } defer file.Close() buf := make([]byte, 1024) n, _ := file.Read(buf) if n > 0 { buf = buf[:n] } err = upload.VerifyAllowedContentType(buf, strings.Split(setting.Attachment.AllowedTypes, ",")) if err != nil { ctx.Error(400, err.Error()) return } datasetID, _ := strconv.ParseInt(ctx.Req.FormValue("dataset_id"), 10, 64) attach, err := models.NewAttachment(&models.Attachment{ IsPrivate: true, UploaderID: ctx.User.ID, Name: header.Filename, DatasetID: datasetID, }, buf, file) if err != nil { ctx.Error(500, fmt.Sprintf("NewAttachment: %v", err)) return } log.Trace("New attachment uploaded: %s", attach.UUID) ctx.JSON(200, map[string]string{ "uuid": attach.UUID, }) } func UpdatePublicAttachment(ctx *context.Context) { file := ctx.Query("file") isPrivate, _ := strconv.ParseBool(ctx.Query("is_private")) attach, err := models.GetAttachmentByUUID(file) if err != nil { ctx.Error(404, err.Error()) return } attach.IsPrivate = isPrivate models.UpdateAttachment(attach) } // DeleteAttachment response for deleting issue's attachment func DeleteAttachment(ctx *context.Context) { file := ctx.Query("file") attach, err := models.GetAttachmentByUUID(file) if err != nil { ctx.Error(400, err.Error()) return } if !ctx.IsSigned || (ctx.User.ID != attach.UploaderID) { ctx.Error(403) return } err = models.DeleteAttachment(attach, false) if err != nil { ctx.Error(500, fmt.Sprintf("DeleteAttachment: %v", err)) return } ctx.JSON(200, map[string]string{ "uuid": attach.UUID, }) } // GetAttachment serve attachements func GetAttachment(ctx *context.Context) { typeCloudBrain := ctx.QueryInt("type") err := checkTypeCloudBrain(typeCloudBrain) if err != nil { ctx.ServerError("checkTypeCloudBrain failed", err) return } attach, err := models.GetAttachmentByUUID(ctx.Params(":uuid")) if err != nil { if models.IsErrAttachmentNotExist(err) { ctx.Error(404) } else { ctx.ServerError("GetAttachmentByUUID", err) } return } repository, unitType, err := attach.LinkedRepository() if err != nil { ctx.ServerError("LinkedRepository", err) return } if repository == nil { //If not linked if !(ctx.IsSigned && attach.UploaderID == ctx.User.ID) && attach.IsPrivate { //We block if not the uploader ctx.Error(http.StatusNotFound) return } } else { //If we have the repository we check access perm, err := models.GetUserRepoPermission(repository, ctx.User) if err != nil { ctx.Error(http.StatusInternalServerError, "GetUserRepoPermission", err.Error()) return } if !perm.CanRead(unitType) { ctx.Error(http.StatusNotFound) return } } dataSet, err := attach.LinkedDataSet() if err != nil { ctx.ServerError("LinkedDataSet", err) return } if dataSet != nil { isPermit, err := models.GetUserDataSetPermission(dataSet, ctx.User) if err != nil { ctx.Error(http.StatusInternalServerError, "GetUserDataSetPermission", err.Error()) return } if !isPermit { ctx.Error(http.StatusNotFound) return } } //If we have matched and access to release or issue if setting.Attachment.StoreType == storage.MinioStorageType { url := "" if typeCloudBrain == models.TypeCloudBrainOne { url, err = storage.Attachments.PresignedGetURL(attach.RelativePath(), attach.Name) if err != nil { ctx.ServerError("PresignedGetURL", err) return } } else { url, err = storage.ObsGetPreSignedUrl(attach.UUID, attach.Name) if err != nil { ctx.ServerError("ObsGetPreSignedUrl", err) return } } log.Info(url) if err = increaseDownloadCount(attach, dataSet); err != nil { ctx.ServerError("Update", err) return } http.Redirect(ctx.Resp, ctx.Req.Request, url, http.StatusMovedPermanently) } else { fr, err := storage.Attachments.Open(attach.RelativePath()) if err != nil { ctx.ServerError("Open", err) return } defer fr.Close() if err = increaseDownloadCount(attach, dataSet); err != nil { ctx.ServerError("Update", err) return } if err = ServeData(ctx, attach.Name, fr); err != nil { ctx.ServerError("ServeData", err) return } } } func increaseDownloadCount(attach *models.Attachment, dataSet *models.Dataset) error { if err := attach.IncreaseDownloadCount(); err != nil { return err } if dataSet != nil { if err := models.IncreaseDownloadCount(dataSet.ID); err != nil { return err } } return nil } // Get a presigned url for put object func GetPresignedPutObjectURL(ctx *context.Context) { if !setting.Attachment.Enabled { ctx.Error(404, "attachment is not enabled") return } err := upload.VerifyFileType(ctx.Params("file_type"), strings.Split(setting.Attachment.AllowedTypes, ",")) if err != nil { ctx.Error(400, err.Error()) return } if setting.Attachment.StoreType == storage.MinioStorageType { uuid := gouuid.NewV4().String() url, err := storage.Attachments.PresignedPutURL(models.AttachmentRelativePath(uuid)) if err != nil { ctx.ServerError("PresignedPutURL", err) return } ctx.JSON(200, map[string]string{ "uuid": uuid, "url": url, }) } else { ctx.Error(404, "storage type is not enabled") return } } // AddAttachment response for add attachment record func AddAttachment(ctx *context.Context) { typeCloudBrain := ctx.QueryInt("type") err := checkTypeCloudBrain(typeCloudBrain) if err != nil { ctx.ServerError("checkTypeCloudBrain failed", err) return } uuid := ctx.Query("uuid") has := false if typeCloudBrain == models.TypeCloudBrainOne { has, err = storage.Attachments.HasObject(models.AttachmentRelativePath(uuid)) if err != nil { ctx.ServerError("HasObject", err) return } } else { has, err = storage.ObsHasObject(setting.BasePath + models.AttachmentRelativePath(uuid) + "/" + uuid) if err != nil { ctx.ServerError("ObsHasObject", err) return } } if !has { ctx.Error(404, "attachment has not been uploaded") return } attachment, err := models.InsertAttachment(&models.Attachment{ UUID: uuid, UploaderID: ctx.User.ID, IsPrivate: true, Name: ctx.Query("file_name"), Size: ctx.QueryInt64("size"), DatasetID: ctx.QueryInt64("dataset_id"), Type: typeCloudBrain, }) if err != nil { ctx.Error(500, fmt.Sprintf("InsertAttachment: %v", err)) return } if attachment.DatasetID != 0 { if strings.HasSuffix(attachment.Name, ".zip") { if typeCloudBrain == models.TypeCloudBrainOne { err = worker.SendDecompressTask(contexExt.Background(), uuid) if err != nil { log.Error("SendDecompressTask(%s) failed:%s", uuid, err.Error()) } else { attachment.DecompressState = models.DecompressStateIng err = models.UpdateAttachment(attachment) if err != nil { log.Error("UpdateAttachment state(%s) failed:%s", uuid, err.Error()) } } } //todo:decompress type_two } } ctx.JSON(200, map[string]string{ "result_code": "0", }) } func UpdateAttachmentDecompressState(ctx *context.Context) { uuid := ctx.Query("uuid") result := ctx.Query("result") attach, err := models.GetAttachmentByUUID(uuid) if err != nil { log.Error("GetAttachmentByUUID(%s) failed:%s", uuid, err.Error()) return } if result == DecompressSuccess { attach.DecompressState = models.DecompressStateDone } else if result == DecompressFailed { attach.DecompressState = models.DecompressStateFailed } else { log.Error("result is error:", result) return } err = models.UpdateAttachment(attach) if err != nil { log.Error("UpdateAttachment(%s) failed:%s", uuid, err.Error()) return } ctx.JSON(200, map[string]string{ "result_code": "0", }) } func GetSuccessChunks(ctx *context.Context) { fileMD5 := ctx.Query("md5") typeCloudBrain := ctx.QueryInt("type") var chunks string err := checkTypeCloudBrain(typeCloudBrain) if err != nil { ctx.ServerError("checkTypeCloudBrain failed", err) return } fileChunk, err := models.GetFileChunkByMD5AndUser(fileMD5, ctx.User.ID, typeCloudBrain) if err != nil { if models.IsErrFileChunkNotExist(err) { ctx.JSON(200, map[string]string{ "uuid": "", "uploaded": "0", "uploadID": "", "chunks": "", }) } else { ctx.ServerError("GetFileChunkByMD5", err) } return } isExist := false if typeCloudBrain == models.TypeCloudBrainOne { isExist, err = storage.Attachments.HasObject(models.AttachmentRelativePath(fileChunk.UUID)) if err != nil { ctx.ServerError("HasObject failed", err) return } } else { isExist, err = storage.ObsHasObject(models.AttachmentRelativePath(fileChunk.UUID)) if err != nil { ctx.ServerError("ObsHasObject failed", err) return } } if isExist { if fileChunk.IsUploaded == models.FileNotUploaded { log.Info("the file has been uploaded but not recorded") fileChunk.IsUploaded = models.FileUploaded if err = models.UpdateFileChunk(fileChunk); err != nil { log.Error("UpdateFileChunk failed:", err.Error()) } } } else { if fileChunk.IsUploaded == models.FileUploaded { log.Info("the file has been recorded but not uploaded") fileChunk.IsUploaded = models.FileNotUploaded if err = models.UpdateFileChunk(fileChunk); err != nil { log.Error("UpdateFileChunk failed:", err.Error()) } } if typeCloudBrain == models.TypeCloudBrainOne { chunks, err = storage.GetPartInfos(fileChunk.UUID, fileChunk.UploadID) if err != nil { ctx.ServerError("GetPartInfos failed", err) return } } else { chunks, err = storage.GetObsPartInfos(fileChunk.UUID, fileChunk.UploadID) if err != nil { ctx.ServerError("GetObsPartInfos failed", err) return } } } var attachID int64 attach, err := models.GetAttachmentByUUID(fileChunk.UUID) if err != nil { if models.IsErrAttachmentNotExist(err) { attachID = 0 } else { ctx.ServerError("GetAttachmentByUUID", err) return } } else { attachID = attach.ID } if attach == nil { ctx.JSON(200, map[string]string{ "uuid": fileChunk.UUID, "uploaded": strconv.Itoa(fileChunk.IsUploaded), "uploadID": fileChunk.UploadID, "chunks": string(chunks), "attachID": "0", "datasetID": "0", "fileName": "", "datasetName": "", }) return } dataset, err := models.GetDatasetByID(attach.DatasetID) if err != nil { ctx.ServerError("GetDatasetByID", err) return } ctx.JSON(200, map[string]string{ "uuid": fileChunk.UUID, "uploaded": strconv.Itoa(fileChunk.IsUploaded), "uploadID": fileChunk.UploadID, "chunks": string(chunks), "attachID": strconv.Itoa(int(attachID)), "datasetID": strconv.Itoa(int(attach.DatasetID)), "fileName": attach.Name, "datasetName": dataset.Title, }) } func NewMultipart(ctx *context.Context) { if !setting.Attachment.Enabled { ctx.Error(404, "attachment is not enabled") return } err := upload.VerifyFileType(ctx.Query("fileType"), strings.Split(setting.Attachment.AllowedTypes, ",")) if err != nil { ctx.Error(400, err.Error()) return } typeCloudBrain := ctx.QueryInt("type") err = checkTypeCloudBrain(typeCloudBrain) if err != nil { ctx.ServerError("checkTypeCloudBrain failed", err) return } if setting.Attachment.StoreType == storage.MinioStorageType { totalChunkCounts := ctx.QueryInt("totalChunkCounts") if totalChunkCounts > minio_ext.MaxPartsCount { ctx.Error(400, fmt.Sprintf("chunk counts(%d) is too much", totalChunkCounts)) return } fileSize := ctx.QueryInt64("size") if fileSize > minio_ext.MaxMultipartPutObjectSize { ctx.Error(400, fmt.Sprintf("file size(%d) is too big", fileSize)) return } uuid := gouuid.NewV4().String() var uploadID string if typeCloudBrain == models.TypeCloudBrainOne { uploadID, err = storage.NewMultiPartUpload(uuid) if err != nil { ctx.ServerError("NewMultipart", err) return } } else { uploadID, err = storage.NewObsMultiPartUpload(uuid) if err != nil { ctx.ServerError("NewObsMultiPartUpload", err) return } } _, err = models.InsertFileChunk(&models.FileChunk{ UUID: uuid, UserID: ctx.User.ID, UploadID: uploadID, Md5: ctx.Query("md5"), Size: fileSize, TotalChunks: totalChunkCounts, Type: typeCloudBrain, }) if err != nil { ctx.Error(500, fmt.Sprintf("InsertFileChunk: %v", err)) return } ctx.JSON(200, map[string]string{ "uuid": uuid, "uploadID": uploadID, }) } else { ctx.Error(404, "storage type is not enabled") return } } func GetMultipartUploadUrl(ctx *context.Context) { uuid := ctx.Query("uuid") uploadID := ctx.Query("uploadID") partNumber := ctx.QueryInt("chunkNumber") size := ctx.QueryInt64("size") typeCloudBrain := ctx.QueryInt("type") err := checkTypeCloudBrain(typeCloudBrain) if err != nil { ctx.ServerError("checkTypeCloudBrain failed", err) return } url := "" if typeCloudBrain == models.TypeCloudBrainOne { if size > minio_ext.MinPartSize { ctx.Error(400, fmt.Sprintf("chunk size(%d) is too big", size)) return } url, err = storage.GenMultiPartSignedUrl(uuid, uploadID, partNumber, size) if err != nil { ctx.Error(500, fmt.Sprintf("GenMultiPartSignedUrl failed: %v", err)) return } } else { url, err = storage.ObsGenMultiPartSignedUrl(uuid, uploadID, partNumber, size) if err != nil { ctx.Error(500, fmt.Sprintf("ObsGenMultiPartSignedUrl failed: %v", err)) return } } ctx.JSON(200, map[string]string{ "url": url, }) } func GetObsKey(ctx *context.Context) { uuid := gouuid.NewV4().String() key := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, uuid)), "/") ctx.JSON(200, map[string]string{ "uuid": uuid, "key": key, "access_key_id": setting.AccessKeyID, "secret_access_key": setting.SecretAccessKey, "server": setting.Endpoint, "bucket": setting.Bucket, }) } func UploadPart(ctx *context.Context) { tmp, err := ctx.Req.Body().String() log.Info(tmp) err = ctx.Req.ParseMultipartForm(100*1024*1024) if err != nil { ctx.Error(http.StatusBadRequest, fmt.Sprintf("ParseMultipartForm failed: %v", err)) return } file, fileHeader, err := ctx.Req.FormFile("file") log.Info(ctx.Req.Form.Get("file")) if err != nil { ctx.Error(http.StatusBadRequest, fmt.Sprintf("FormFile failed: %v", err)) return } log.Info(fileHeader.Filename) etag, err := storage.ObsUploadPart("", "", 1, 1, file) if err != nil { ctx.Error(500, fmt.Sprintf("ObsUploadPart failed: %v", err)) return } ctx.JSON(200, map[string]string{ "etag": etag, }) } func CompleteMultipart(ctx *context.Context) { uuid := ctx.Query("uuid") uploadID := ctx.Query("uploadID") typeCloudBrain := ctx.QueryInt("type") err := checkTypeCloudBrain(typeCloudBrain) if err != nil { ctx.ServerError("checkTypeCloudBrain failed", err) return } fileChunk, err := models.GetFileChunkByUUID(uuid) if err != nil { if models.IsErrFileChunkNotExist(err) { ctx.Error(404) } else { ctx.ServerError("GetFileChunkByUUID", err) } return } if typeCloudBrain == models.TypeCloudBrainOne { _, err = storage.CompleteMultiPartUpload(uuid, uploadID) if err != nil { ctx.Error(500, fmt.Sprintf("CompleteMultiPartUpload failed: %v", err)) return } } else { err = storage.CompleteObsMultiPartUpload(uuid, uploadID) if err != nil { ctx.Error(500, fmt.Sprintf("CompleteObsMultiPartUpload failed: %v", err)) return } } fileChunk.IsUploaded = models.FileUploaded err = models.UpdateFileChunk(fileChunk) if err != nil { ctx.Error(500, fmt.Sprintf("UpdateFileChunk: %v", err)) return } attachment, err := models.InsertAttachment(&models.Attachment{ UUID: uuid, UploaderID: ctx.User.ID, IsPrivate: true, Name: ctx.Query("file_name"), Size: ctx.QueryInt64("size"), DatasetID: ctx.QueryInt64("dataset_id"), Type: typeCloudBrain, }) if err != nil { ctx.Error(500, fmt.Sprintf("InsertAttachment: %v", err)) return } if attachment.DatasetID != 0 { if strings.HasSuffix(attachment.Name, ".zip") { err = worker.SendDecompressTask(contexExt.Background(), uuid) if err != nil { log.Error("SendDecompressTask(%s) failed:%s", uuid, err.Error()) } else { attachment.DecompressState = models.DecompressStateIng err = models.UpdateAttachment(attachment) if err != nil { log.Error("UpdateAttachment state(%s) failed:%s", uuid, err.Error()) } } } } ctx.JSON(200, map[string]string{ "result_code": "0", }) } func UpdateMultipart(ctx *context.Context) { uuid := ctx.Query("uuid") partNumber := ctx.QueryInt("chunkNumber") etag := ctx.Query("etag") fileChunk, err := models.GetFileChunkByUUID(uuid) if err != nil { if models.IsErrFileChunkNotExist(err) { ctx.Error(404) } else { ctx.ServerError("GetFileChunkByUUID", err) } return } fileChunk.CompletedParts = append(fileChunk.CompletedParts, strconv.Itoa(partNumber)+"-"+strings.Replace(etag, "\"", "", -1)) err = models.UpdateFileChunk(fileChunk) if err != nil { ctx.Error(500, fmt.Sprintf("UpdateFileChunk: %v", err)) return } ctx.JSON(200, map[string]string{ "result_code": "0", }) } func HandleUnDecompressAttachment() { attachs, err := models.GetUnDecompressAttachments() if err != nil { log.Error("GetUnDecompressAttachments failed:", err.Error()) return } for _, attach := range attachs { err = worker.SendDecompressTask(contexExt.Background(), attach.UUID) if err != nil { log.Error("SendDecompressTask(%s) failed:%s", attach.UUID, err.Error()) } else { attach.DecompressState = models.DecompressStateIng err = models.UpdateAttachment(attach) if err != nil { log.Error("UpdateAttachment state(%s) failed:%s", attach.UUID, err.Error()) } } } return } func QueryAllPublicDataset(ctx *context.Context) { attachs, err := models.GetAllPublicAttachments() if err != nil { ctx.JSON(200, map[string]string{ "result_code": "-1", "error_msg": err.Error(), "data": "", }) return } queryDatasets(ctx, attachs) } func QueryPrivateDataset(ctx *context.Context) { username := ctx.Params(":username") attachs, err := models.GetPrivateAttachments(username) if err != nil { ctx.JSON(200, map[string]string{ "result_code": "-1", "error_msg": err.Error(), "data": "", }) return } for _, attach := range attachs { attach.Name = username } queryDatasets(ctx, attachs) } func queryDatasets(ctx *context.Context, attachs []*models.AttachmentUsername) { var datasets []CloudBrainDataset if len(attachs) == 0 { log.Info("dataset is null") ctx.JSON(200, map[string]string{ "result_code": "0", "error_msg": "", "data": "", }) return } for _, attch := range attachs { has, err := storage.Attachments.HasObject(models.AttachmentRelativePath(attch.UUID)) if err != nil || !has { continue } datasets = append(datasets, CloudBrainDataset{strconv.FormatInt(attch.ID, 10), attch.Attachment.Name, setting.Attachment.Minio.RealPath + setting.Attachment.Minio.Bucket + "/" + setting.Attachment.Minio.BasePath + models.AttachmentRelativePath(attch.UUID) + attch.UUID, attch.Name, attch.CreatedUnix.Format("2006-01-02 03:04:05 PM")}) } data, err := json.Marshal(datasets) if err != nil { log.Error("json.Marshal failed:", err.Error()) ctx.JSON(200, map[string]string{ "result_code": "-1", "error_msg": err.Error(), "data": "", }) return } ctx.JSON(200, map[string]string{ "result_code": "0", "error_msg": "", "data": string(data), }) return } func checkTypeCloudBrain(typeCloudBrain int) error { if typeCloudBrain != models.TypeCloudBrainOne && typeCloudBrain != models.TypeCloudBrainTwo { log.Error("type error:", typeCloudBrain) return errors.New("type error") } return nil }