You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

attachment.go 23 kB

4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
3 years ago
4 years ago
3 years ago
4 years ago
3 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900
  1. // Copyright 2017 The Gitea Authors. All rights reserved.
  2. // Use of this source code is governed by a MIT-style
  3. // license that can be found in the LICENSE file.
  4. package repo
  5. import (
  6. contexExt "context"
  7. "encoding/json"
  8. "errors"
  9. "fmt"
  10. "mime/multipart"
  11. "net/http"
  12. "path"
  13. "strconv"
  14. "strings"
  15. "code.gitea.io/gitea/models"
  16. "code.gitea.io/gitea/modules/context"
  17. "code.gitea.io/gitea/modules/log"
  18. "code.gitea.io/gitea/modules/minio_ext"
  19. "code.gitea.io/gitea/modules/setting"
  20. "code.gitea.io/gitea/modules/storage"
  21. "code.gitea.io/gitea/modules/upload"
  22. "code.gitea.io/gitea/modules/worker"
  23. gouuid "github.com/satori/go.uuid"
  24. )
  25. const (
  26. //result of decompress
  27. DecompressSuccess = "0"
  28. DecompressFailed = "1"
  29. )
  30. type CloudBrainDataset struct {
  31. UUID string `json:"id"`
  32. Name string `json:"name"`
  33. Path string `json:"place"`
  34. UserName string `json:"provider"`
  35. CreateTime string `json:"created_at"`
  36. }
  37. type UploadForm struct {
  38. UploadID string `form:"uploadId"`
  39. UuID string `form:"uuid"`
  40. PartSize int64 `form:"size"`
  41. Offset int64 `form:"offset"`
  42. PartNumber int `form:"chunkNumber"`
  43. PartFile multipart.File `form:"file"`
  44. }
  45. func RenderAttachmentSettings(ctx *context.Context) {
  46. renderAttachmentSettings(ctx)
  47. }
  48. func renderAttachmentSettings(ctx *context.Context) {
  49. ctx.Data["IsAttachmentEnabled"] = setting.Attachment.Enabled
  50. ctx.Data["AttachmentStoreType"] = setting.Attachment.StoreType
  51. ctx.Data["AttachmentAllowedTypes"] = setting.Attachment.AllowedTypes
  52. ctx.Data["AttachmentMaxSize"] = setting.Attachment.MaxSize
  53. ctx.Data["AttachmentMaxFiles"] = setting.Attachment.MaxFiles
  54. }
  55. // UploadAttachment response for uploading issue's attachment
  56. func UploadAttachment(ctx *context.Context) {
  57. if !setting.Attachment.Enabled {
  58. ctx.Error(404, "attachment is not enabled")
  59. return
  60. }
  61. file, header, err := ctx.Req.FormFile("file")
  62. if err != nil {
  63. ctx.Error(500, fmt.Sprintf("FormFile: %v", err))
  64. return
  65. }
  66. defer file.Close()
  67. buf := make([]byte, 1024)
  68. n, _ := file.Read(buf)
  69. if n > 0 {
  70. buf = buf[:n]
  71. }
  72. err = upload.VerifyAllowedContentType(buf, strings.Split(setting.Attachment.AllowedTypes, ","))
  73. if err != nil {
  74. ctx.Error(400, err.Error())
  75. return
  76. }
  77. datasetID, _ := strconv.ParseInt(ctx.Req.FormValue("dataset_id"), 10, 64)
  78. attach, err := models.NewAttachment(&models.Attachment{
  79. IsPrivate: true,
  80. UploaderID: ctx.User.ID,
  81. Name: header.Filename,
  82. DatasetID: datasetID,
  83. }, buf, file)
  84. if err != nil {
  85. ctx.Error(500, fmt.Sprintf("NewAttachment: %v", err))
  86. return
  87. }
  88. log.Trace("New attachment uploaded: %s", attach.UUID)
  89. ctx.JSON(200, map[string]string{
  90. "uuid": attach.UUID,
  91. })
  92. }
  93. func UpdatePublicAttachment(ctx *context.Context) {
  94. file := ctx.Query("file")
  95. isPrivate, _ := strconv.ParseBool(ctx.Query("is_private"))
  96. attach, err := models.GetAttachmentByUUID(file)
  97. if err != nil {
  98. ctx.Error(404, err.Error())
  99. return
  100. }
  101. attach.IsPrivate = isPrivate
  102. models.UpdateAttachment(attach)
  103. }
  104. // DeleteAttachment response for deleting issue's attachment
  105. func DeleteAttachment(ctx *context.Context) {
  106. file := ctx.Query("file")
  107. attach, err := models.GetAttachmentByUUID(file)
  108. if err != nil {
  109. ctx.Error(400, err.Error())
  110. return
  111. }
  112. if !ctx.IsSigned || (ctx.User.ID != attach.UploaderID) {
  113. ctx.Error(403)
  114. return
  115. }
  116. err = models.DeleteAttachment(attach, false)
  117. if err != nil {
  118. ctx.Error(500, fmt.Sprintf("DeleteAttachment: %v", err))
  119. return
  120. }
  121. ctx.JSON(200, map[string]string{
  122. "uuid": attach.UUID,
  123. })
  124. }
  125. func DownloadUserIsOrgOrCollaboration(ctx *context.Context, attach *models.Attachment) bool {
  126. dataset, err := models.GetDatasetByID(attach.DatasetID)
  127. if err != nil {
  128. log.Info("query dataset error")
  129. } else {
  130. repo, err := models.GetRepositoryByID(dataset.RepoID)
  131. if err != nil {
  132. log.Info("query repo error.")
  133. } else {
  134. repo.GetOwner()
  135. if ctx.User != nil {
  136. if repo.Owner.IsOrganization() {
  137. if repo.Owner.IsUserPartOfOrg(ctx.User.ID) {
  138. log.Info("org user may visit the attach.")
  139. return true
  140. }
  141. }
  142. isCollaborator, _ := repo.IsCollaborator(ctx.User.ID)
  143. if isCollaborator {
  144. log.Info("Collaborator user may visit the attach.")
  145. return true
  146. }
  147. }
  148. }
  149. }
  150. return false
  151. }
  152. // GetAttachment serve attachements
  153. func GetAttachment(ctx *context.Context) {
  154. typeCloudBrain := ctx.QueryInt("type")
  155. err := checkTypeCloudBrain(typeCloudBrain)
  156. if err != nil {
  157. ctx.ServerError("checkTypeCloudBrain failed", err)
  158. return
  159. }
  160. attach, err := models.GetAttachmentByUUID(ctx.Params(":uuid"))
  161. if err != nil {
  162. if models.IsErrAttachmentNotExist(err) {
  163. ctx.Error(404)
  164. } else {
  165. ctx.ServerError("GetAttachmentByUUID", err)
  166. }
  167. return
  168. }
  169. repository, unitType, err := attach.LinkedRepository()
  170. if err != nil {
  171. ctx.ServerError("LinkedRepository", err)
  172. return
  173. }
  174. dataSet, err := attach.LinkedDataSet()
  175. if err != nil {
  176. ctx.ServerError("LinkedDataSet", err)
  177. return
  178. }
  179. if repository == nil && dataSet != nil {
  180. repository, _ = models.GetRepositoryByID(dataSet.RepoID)
  181. unitType = models.UnitTypeDatasets
  182. }
  183. if repository == nil { //If not linked
  184. //if !(ctx.IsSigned && attach.UploaderID == ctx.User.ID) && attach.IsPrivate { //We block if not the uploader
  185. //log.Info("ctx.IsSigned =" + fmt.Sprintf("%v", ctx.IsSigned))
  186. if !(ctx.IsSigned && attach.UploaderID == ctx.User.ID) && attach.IsPrivate && !DownloadUserIsOrgOrCollaboration(ctx, attach) { //We block if not the uploader
  187. ctx.Error(http.StatusNotFound)
  188. return
  189. }
  190. } else { //If we have the repository we check access
  191. perm, errPermission := models.GetUserRepoPermission(repository, ctx.User)
  192. if errPermission != nil {
  193. ctx.Error(http.StatusInternalServerError, "GetUserRepoPermission", errPermission.Error())
  194. return
  195. }
  196. if !perm.CanRead(unitType) {
  197. ctx.Error(http.StatusNotFound)
  198. return
  199. }
  200. }
  201. if dataSet != nil {
  202. isPermit, err := models.GetUserDataSetPermission(dataSet, ctx.User)
  203. if err != nil {
  204. ctx.Error(http.StatusInternalServerError, "GetUserDataSetPermission", err.Error())
  205. return
  206. }
  207. if !isPermit {
  208. ctx.Error(http.StatusNotFound)
  209. return
  210. }
  211. }
  212. //If we have matched and access to release or issue
  213. if setting.Attachment.StoreType == storage.MinioStorageType {
  214. url := ""
  215. if typeCloudBrain == models.TypeCloudBrainOne {
  216. url, err = storage.Attachments.PresignedGetURL(setting.Attachment.Minio.BasePath+attach.RelativePath(), attach.Name)
  217. if err != nil {
  218. ctx.ServerError("PresignedGetURL", err)
  219. return
  220. }
  221. } else {
  222. url, err = storage.ObsGetPreSignedUrl(attach.UUID, attach.Name)
  223. if err != nil {
  224. ctx.ServerError("ObsGetPreSignedUrl", err)
  225. return
  226. }
  227. }
  228. if err = increaseDownloadCount(attach, dataSet); err != nil {
  229. ctx.ServerError("Update", err)
  230. return
  231. }
  232. http.Redirect(ctx.Resp, ctx.Req.Request, url, http.StatusMovedPermanently)
  233. } else {
  234. fr, err := storage.Attachments.Open(attach.RelativePath())
  235. if err != nil {
  236. ctx.ServerError("Open", err)
  237. return
  238. }
  239. defer fr.Close()
  240. if err = increaseDownloadCount(attach, dataSet); err != nil {
  241. ctx.ServerError("Update", err)
  242. return
  243. }
  244. if err = ServeData(ctx, attach.Name, fr); err != nil {
  245. ctx.ServerError("ServeData", err)
  246. return
  247. }
  248. }
  249. }
  250. func increaseDownloadCount(attach *models.Attachment, dataSet *models.Dataset) error {
  251. if err := attach.IncreaseDownloadCount(); err != nil {
  252. return err
  253. }
  254. if dataSet != nil {
  255. if err := models.IncreaseDownloadCount(dataSet.ID); err != nil {
  256. return err
  257. }
  258. }
  259. return nil
  260. }
  261. // Get a presigned url for put object
  262. func GetPresignedPutObjectURL(ctx *context.Context) {
  263. if !setting.Attachment.Enabled {
  264. ctx.Error(404, "attachment is not enabled")
  265. return
  266. }
  267. err := upload.VerifyFileType(ctx.Params("file_type"), strings.Split(setting.Attachment.AllowedTypes, ","))
  268. if err != nil {
  269. ctx.Error(400, err.Error())
  270. return
  271. }
  272. if setting.Attachment.StoreType == storage.MinioStorageType {
  273. uuid := gouuid.NewV4().String()
  274. url, err := storage.Attachments.PresignedPutURL(models.AttachmentRelativePath(uuid))
  275. if err != nil {
  276. ctx.ServerError("PresignedPutURL", err)
  277. return
  278. }
  279. ctx.JSON(200, map[string]string{
  280. "uuid": uuid,
  281. "url": url,
  282. })
  283. } else {
  284. ctx.Error(404, "storage type is not enabled")
  285. return
  286. }
  287. }
  288. // AddAttachment response for add attachment record
  289. func AddAttachment(ctx *context.Context) {
  290. typeCloudBrain := ctx.QueryInt("type")
  291. err := checkTypeCloudBrain(typeCloudBrain)
  292. if err != nil {
  293. ctx.ServerError("checkTypeCloudBrain failed", err)
  294. return
  295. }
  296. uuid := ctx.Query("uuid")
  297. has := false
  298. if typeCloudBrain == models.TypeCloudBrainOne {
  299. has, err = storage.Attachments.HasObject(models.AttachmentRelativePath(uuid))
  300. if err != nil {
  301. ctx.ServerError("HasObject", err)
  302. return
  303. }
  304. } else {
  305. has, err = storage.ObsHasObject(setting.BasePath + models.AttachmentRelativePath(uuid) + "/" + uuid)
  306. if err != nil {
  307. ctx.ServerError("ObsHasObject", err)
  308. return
  309. }
  310. }
  311. if !has {
  312. ctx.Error(404, "attachment has not been uploaded")
  313. return
  314. }
  315. attachment, err := models.InsertAttachment(&models.Attachment{
  316. UUID: uuid,
  317. UploaderID: ctx.User.ID,
  318. IsPrivate: true,
  319. Name: ctx.Query("file_name"),
  320. Size: ctx.QueryInt64("size"),
  321. DatasetID: ctx.QueryInt64("dataset_id"),
  322. Type: typeCloudBrain,
  323. })
  324. if err != nil {
  325. ctx.Error(500, fmt.Sprintf("InsertAttachment: %v", err))
  326. return
  327. }
  328. if attachment.DatasetID != 0 {
  329. if strings.HasSuffix(attachment.Name, ".zip") {
  330. if typeCloudBrain == models.TypeCloudBrainOne {
  331. err = worker.SendDecompressTask(contexExt.Background(), uuid)
  332. if err != nil {
  333. log.Error("SendDecompressTask(%s) failed:%s", uuid, err.Error())
  334. } else {
  335. attachment.DecompressState = models.DecompressStateIng
  336. err = models.UpdateAttachment(attachment)
  337. if err != nil {
  338. log.Error("UpdateAttachment state(%s) failed:%s", uuid, err.Error())
  339. }
  340. }
  341. }
  342. //todo:decompress type_two
  343. }
  344. }
  345. ctx.JSON(200, map[string]string{
  346. "result_code": "0",
  347. })
  348. }
  349. func UpdateAttachmentDecompressState(ctx *context.Context) {
  350. uuid := ctx.Query("uuid")
  351. result := ctx.Query("result")
  352. attach, err := models.GetAttachmentByUUID(uuid)
  353. if err != nil {
  354. log.Error("GetAttachmentByUUID(%s) failed:%s", uuid, err.Error())
  355. return
  356. }
  357. if result == DecompressSuccess {
  358. attach.DecompressState = models.DecompressStateDone
  359. } else if result == DecompressFailed {
  360. attach.DecompressState = models.DecompressStateFailed
  361. } else {
  362. log.Error("result is error:", result)
  363. return
  364. }
  365. err = models.UpdateAttachment(attach)
  366. if err != nil {
  367. log.Error("UpdateAttachment(%s) failed:%s", uuid, err.Error())
  368. return
  369. }
  370. ctx.JSON(200, map[string]string{
  371. "result_code": "0",
  372. })
  373. }
  374. func GetSuccessChunks(ctx *context.Context) {
  375. fileMD5 := ctx.Query("md5")
  376. typeCloudBrain := ctx.QueryInt("type")
  377. var chunks string
  378. err := checkTypeCloudBrain(typeCloudBrain)
  379. if err != nil {
  380. ctx.ServerError("checkTypeCloudBrain failed", err)
  381. return
  382. }
  383. fileChunk, err := models.GetFileChunkByMD5AndUser(fileMD5, ctx.User.ID, typeCloudBrain)
  384. if err != nil {
  385. if models.IsErrFileChunkNotExist(err) {
  386. ctx.JSON(200, map[string]string{
  387. "uuid": "",
  388. "uploaded": "0",
  389. "uploadID": "",
  390. "chunks": "",
  391. })
  392. } else {
  393. ctx.ServerError("GetFileChunkByMD5", err)
  394. }
  395. return
  396. }
  397. isExist := false
  398. if typeCloudBrain == models.TypeCloudBrainOne {
  399. isExist, err = storage.Attachments.HasObject(models.AttachmentRelativePath(fileChunk.UUID))
  400. if err != nil {
  401. ctx.ServerError("HasObject failed", err)
  402. return
  403. }
  404. } else {
  405. isExist, err = storage.ObsHasObject(setting.BasePath + models.AttachmentRelativePath(fileChunk.UUID) + "/" + fileChunk.UUID)
  406. if err != nil {
  407. ctx.ServerError("ObsHasObject failed", err)
  408. return
  409. }
  410. }
  411. if isExist {
  412. if fileChunk.IsUploaded == models.FileNotUploaded {
  413. log.Info("the file has been uploaded but not recorded")
  414. fileChunk.IsUploaded = models.FileUploaded
  415. if err = models.UpdateFileChunk(fileChunk); err != nil {
  416. log.Error("UpdateFileChunk failed:", err.Error())
  417. }
  418. }
  419. } else {
  420. if fileChunk.IsUploaded == models.FileUploaded {
  421. log.Info("the file has been recorded but not uploaded")
  422. fileChunk.IsUploaded = models.FileNotUploaded
  423. if err = models.UpdateFileChunk(fileChunk); err != nil {
  424. log.Error("UpdateFileChunk failed:", err.Error())
  425. }
  426. }
  427. if typeCloudBrain == models.TypeCloudBrainOne {
  428. chunks, err = storage.GetPartInfos(fileChunk.UUID, fileChunk.UploadID)
  429. if err != nil {
  430. log.Error("GetPartInfos failed:%v", err.Error())
  431. }
  432. } else {
  433. chunks, err = storage.GetObsPartInfos(fileChunk.UUID, fileChunk.UploadID)
  434. if err != nil {
  435. log.Error("GetObsPartInfos failed:%v", err.Error())
  436. }
  437. }
  438. if err != nil {
  439. models.DeleteFileChunk(fileChunk)
  440. ctx.JSON(200, map[string]string{
  441. "uuid": "",
  442. "uploaded": "0",
  443. "uploadID": "",
  444. "chunks": "",
  445. })
  446. return
  447. }
  448. }
  449. var attachID int64
  450. attach, err := models.GetAttachmentByUUID(fileChunk.UUID)
  451. if err != nil {
  452. if models.IsErrAttachmentNotExist(err) {
  453. attachID = 0
  454. } else {
  455. ctx.ServerError("GetAttachmentByUUID", err)
  456. return
  457. }
  458. } else {
  459. attachID = attach.ID
  460. }
  461. if attach == nil {
  462. ctx.JSON(200, map[string]string{
  463. "uuid": fileChunk.UUID,
  464. "uploaded": strconv.Itoa(fileChunk.IsUploaded),
  465. "uploadID": fileChunk.UploadID,
  466. "chunks": string(chunks),
  467. "attachID": "0",
  468. "datasetID": "0",
  469. "fileName": "",
  470. "datasetName": "",
  471. })
  472. return
  473. }
  474. dataset, err := models.GetDatasetByID(attach.DatasetID)
  475. if err != nil {
  476. ctx.ServerError("GetDatasetByID", err)
  477. return
  478. }
  479. ctx.JSON(200, map[string]string{
  480. "uuid": fileChunk.UUID,
  481. "uploaded": strconv.Itoa(fileChunk.IsUploaded),
  482. "uploadID": fileChunk.UploadID,
  483. "chunks": string(chunks),
  484. "attachID": strconv.Itoa(int(attachID)),
  485. "datasetID": strconv.Itoa(int(attach.DatasetID)),
  486. "fileName": attach.Name,
  487. "datasetName": dataset.Title,
  488. })
  489. }
  490. func NewMultipart(ctx *context.Context) {
  491. if !setting.Attachment.Enabled {
  492. ctx.Error(404, "attachment is not enabled")
  493. return
  494. }
  495. err := upload.VerifyFileType(ctx.Query("fileType"), strings.Split(setting.Attachment.AllowedTypes, ","))
  496. if err != nil {
  497. ctx.Error(400, err.Error())
  498. return
  499. }
  500. typeCloudBrain := ctx.QueryInt("type")
  501. err = checkTypeCloudBrain(typeCloudBrain)
  502. if err != nil {
  503. ctx.ServerError("checkTypeCloudBrain failed", err)
  504. return
  505. }
  506. fileName := ctx.Query("file_name")
  507. if setting.Attachment.StoreType == storage.MinioStorageType {
  508. totalChunkCounts := ctx.QueryInt("totalChunkCounts")
  509. if totalChunkCounts > minio_ext.MaxPartsCount {
  510. ctx.Error(400, fmt.Sprintf("chunk counts(%d) is too much", totalChunkCounts))
  511. return
  512. }
  513. fileSize := ctx.QueryInt64("size")
  514. if fileSize > minio_ext.MaxMultipartPutObjectSize {
  515. ctx.Error(400, fmt.Sprintf("file size(%d) is too big", fileSize))
  516. return
  517. }
  518. uuid := gouuid.NewV4().String()
  519. var uploadID string
  520. if typeCloudBrain == models.TypeCloudBrainOne {
  521. uploadID, err = storage.NewMultiPartUpload(uuid)
  522. if err != nil {
  523. ctx.ServerError("NewMultipart", err)
  524. return
  525. }
  526. } else {
  527. uploadID, err = storage.NewObsMultiPartUpload(uuid, fileName)
  528. if err != nil {
  529. ctx.ServerError("NewObsMultiPartUpload", err)
  530. return
  531. }
  532. }
  533. _, err = models.InsertFileChunk(&models.FileChunk{
  534. UUID: uuid,
  535. UserID: ctx.User.ID,
  536. UploadID: uploadID,
  537. Md5: ctx.Query("md5"),
  538. Size: fileSize,
  539. TotalChunks: totalChunkCounts,
  540. Type: typeCloudBrain,
  541. })
  542. if err != nil {
  543. ctx.Error(500, fmt.Sprintf("InsertFileChunk: %v", err))
  544. return
  545. }
  546. ctx.JSON(200, map[string]string{
  547. "uuid": uuid,
  548. "uploadID": uploadID,
  549. })
  550. } else {
  551. ctx.Error(404, "storage type is not enabled")
  552. return
  553. }
  554. }
  555. func GetMultipartUploadUrl(ctx *context.Context) {
  556. uuid := ctx.Query("uuid")
  557. uploadID := ctx.Query("uploadID")
  558. partNumber := ctx.QueryInt("chunkNumber")
  559. size := ctx.QueryInt64("size")
  560. fileName := ctx.Query("file_name")
  561. typeCloudBrain := ctx.QueryInt("type")
  562. err := checkTypeCloudBrain(typeCloudBrain)
  563. if err != nil {
  564. ctx.ServerError("checkTypeCloudBrain failed", err)
  565. return
  566. }
  567. url := ""
  568. if typeCloudBrain == models.TypeCloudBrainOne {
  569. if size > minio_ext.MinPartSize {
  570. ctx.Error(400, fmt.Sprintf("chunk size(%d) is too big", size))
  571. return
  572. }
  573. url, err = storage.GenMultiPartSignedUrl(uuid, uploadID, partNumber, size)
  574. if err != nil {
  575. ctx.Error(500, fmt.Sprintf("GenMultiPartSignedUrl failed: %v", err))
  576. return
  577. }
  578. } else {
  579. url, err = storage.ObsGenMultiPartSignedUrl(uuid, uploadID, partNumber, fileName)
  580. if err != nil {
  581. ctx.Error(500, fmt.Sprintf("ObsGenMultiPartSignedUrl failed: %v", err))
  582. return
  583. }
  584. }
  585. ctx.JSON(200, map[string]string{
  586. "url": url,
  587. })
  588. }
  589. func GetObsKey(ctx *context.Context) {
  590. uuid := gouuid.NewV4().String()
  591. key := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, uuid)), "/")
  592. ctx.JSON(200, map[string]string{
  593. "uuid": uuid,
  594. "key": key,
  595. "access_key_id": setting.AccessKeyID,
  596. "secret_access_key": setting.SecretAccessKey,
  597. "server": setting.Endpoint,
  598. "bucket": setting.Bucket,
  599. })
  600. }
  601. func CompleteMultipart(ctx *context.Context) {
  602. uuid := ctx.Query("uuid")
  603. uploadID := ctx.Query("uploadID")
  604. typeCloudBrain := ctx.QueryInt("type")
  605. fileName := ctx.Query("file_name")
  606. err := checkTypeCloudBrain(typeCloudBrain)
  607. if err != nil {
  608. ctx.ServerError("checkTypeCloudBrain failed", err)
  609. return
  610. }
  611. fileChunk, err := models.GetFileChunkByUUID(uuid)
  612. if err != nil {
  613. if models.IsErrFileChunkNotExist(err) {
  614. ctx.Error(404)
  615. } else {
  616. ctx.ServerError("GetFileChunkByUUID", err)
  617. }
  618. return
  619. }
  620. if typeCloudBrain == models.TypeCloudBrainOne {
  621. _, err = storage.CompleteMultiPartUpload(uuid, uploadID)
  622. if err != nil {
  623. ctx.Error(500, fmt.Sprintf("CompleteMultiPartUpload failed: %v", err))
  624. return
  625. }
  626. } else {
  627. err = storage.CompleteObsMultiPartUpload(uuid, uploadID, fileName)
  628. if err != nil {
  629. ctx.Error(500, fmt.Sprintf("CompleteObsMultiPartUpload failed: %v", err))
  630. return
  631. }
  632. }
  633. fileChunk.IsUploaded = models.FileUploaded
  634. err = models.UpdateFileChunk(fileChunk)
  635. if err != nil {
  636. ctx.Error(500, fmt.Sprintf("UpdateFileChunk: %v", err))
  637. return
  638. }
  639. attachment, err := models.InsertAttachment(&models.Attachment{
  640. UUID: uuid,
  641. UploaderID: ctx.User.ID,
  642. IsPrivate: true,
  643. Name: fileName,
  644. Size: ctx.QueryInt64("size"),
  645. DatasetID: ctx.QueryInt64("dataset_id"),
  646. Type: typeCloudBrain,
  647. })
  648. if err != nil {
  649. ctx.Error(500, fmt.Sprintf("InsertAttachment: %v", err))
  650. return
  651. }
  652. if attachment.DatasetID != 0 {
  653. if typeCloudBrain == models.TypeCloudBrainOne {
  654. if strings.HasSuffix(attachment.Name, ".zip") {
  655. err = worker.SendDecompressTask(contexExt.Background(), uuid)
  656. if err != nil {
  657. log.Error("SendDecompressTask(%s) failed:%s", uuid, err.Error())
  658. } else {
  659. attachment.DecompressState = models.DecompressStateIng
  660. err = models.UpdateAttachment(attachment)
  661. if err != nil {
  662. log.Error("UpdateAttachment state(%s) failed:%s", uuid, err.Error())
  663. }
  664. }
  665. }
  666. }
  667. }
  668. ctx.JSON(200, map[string]string{
  669. "result_code": "0",
  670. })
  671. }
  672. func UpdateMultipart(ctx *context.Context) {
  673. uuid := ctx.Query("uuid")
  674. partNumber := ctx.QueryInt("chunkNumber")
  675. etag := ctx.Query("etag")
  676. fileChunk, err := models.GetFileChunkByUUID(uuid)
  677. if err != nil {
  678. if models.IsErrFileChunkNotExist(err) {
  679. ctx.Error(404)
  680. } else {
  681. ctx.ServerError("GetFileChunkByUUID", err)
  682. }
  683. return
  684. }
  685. fileChunk.CompletedParts = append(fileChunk.CompletedParts, strconv.Itoa(partNumber)+"-"+strings.Replace(etag, "\"", "", -1))
  686. err = models.UpdateFileChunk(fileChunk)
  687. if err != nil {
  688. ctx.Error(500, fmt.Sprintf("UpdateFileChunk: %v", err))
  689. return
  690. }
  691. ctx.JSON(200, map[string]string{
  692. "result_code": "0",
  693. })
  694. }
  695. func HandleUnDecompressAttachment() {
  696. attachs, err := models.GetUnDecompressAttachments()
  697. if err != nil {
  698. log.Error("GetUnDecompressAttachments failed:", err.Error())
  699. return
  700. }
  701. for _, attach := range attachs {
  702. err = worker.SendDecompressTask(contexExt.Background(), attach.UUID)
  703. if err != nil {
  704. log.Error("SendDecompressTask(%s) failed:%s", attach.UUID, err.Error())
  705. } else {
  706. attach.DecompressState = models.DecompressStateIng
  707. err = models.UpdateAttachment(attach)
  708. if err != nil {
  709. log.Error("UpdateAttachment state(%s) failed:%s", attach.UUID, err.Error())
  710. }
  711. }
  712. }
  713. return
  714. }
  715. func QueryAllPublicDataset(ctx *context.Context) {
  716. attachs, err := models.GetAllPublicAttachments()
  717. if err != nil {
  718. ctx.JSON(200, map[string]string{
  719. "result_code": "-1",
  720. "error_msg": err.Error(),
  721. "data": "",
  722. })
  723. return
  724. }
  725. queryDatasets(ctx, attachs)
  726. }
  727. func QueryPrivateDataset(ctx *context.Context) {
  728. username := ctx.Params(":username")
  729. attachs, err := models.GetPrivateAttachments(username)
  730. if err != nil {
  731. ctx.JSON(200, map[string]string{
  732. "result_code": "-1",
  733. "error_msg": err.Error(),
  734. "data": "",
  735. })
  736. return
  737. }
  738. for _, attach := range attachs {
  739. attach.Name = username
  740. }
  741. queryDatasets(ctx, attachs)
  742. }
  743. func queryDatasets(ctx *context.Context, attachs []*models.AttachmentUsername) {
  744. var datasets []CloudBrainDataset
  745. if len(attachs) == 0 {
  746. log.Info("dataset is null")
  747. ctx.JSON(200, map[string]string{
  748. "result_code": "0",
  749. "error_msg": "",
  750. "data": "",
  751. })
  752. return
  753. }
  754. for _, attch := range attachs {
  755. has, err := storage.Attachments.HasObject(models.AttachmentRelativePath(attch.UUID))
  756. if err != nil || !has {
  757. continue
  758. }
  759. datasets = append(datasets, CloudBrainDataset{strconv.FormatInt(attch.ID, 10),
  760. attch.Attachment.Name,
  761. setting.Attachment.Minio.RealPath +
  762. setting.Attachment.Minio.Bucket + "/" +
  763. setting.Attachment.Minio.BasePath +
  764. models.AttachmentRelativePath(attch.UUID) +
  765. attch.UUID,
  766. attch.Name,
  767. attch.CreatedUnix.Format("2006-01-02 03:04:05 PM")})
  768. }
  769. data, err := json.Marshal(datasets)
  770. if err != nil {
  771. log.Error("json.Marshal failed:", err.Error())
  772. ctx.JSON(200, map[string]string{
  773. "result_code": "-1",
  774. "error_msg": err.Error(),
  775. "data": "",
  776. })
  777. return
  778. }
  779. ctx.JSON(200, map[string]string{
  780. "result_code": "0",
  781. "error_msg": "",
  782. "data": string(data),
  783. })
  784. return
  785. }
  786. func checkTypeCloudBrain(typeCloudBrain int) error {
  787. if typeCloudBrain != models.TypeCloudBrainOne && typeCloudBrain != models.TypeCloudBrainTwo {
  788. log.Error("type error:", typeCloudBrain)
  789. return errors.New("type error")
  790. }
  791. return nil
  792. }