You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

attachment.go 25 kB

4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
3 years ago
4 years ago
3 years ago
4 years ago
3 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954
  1. // Copyright 2017 The Gitea Authors. All rights reserved.
  2. // Use of this source code is governed by a MIT-style
  3. // license that can be found in the LICENSE file.
  4. package repo
  5. import (
  6. contexExt "context"
  7. "encoding/json"
  8. "errors"
  9. "fmt"
  10. "mime/multipart"
  11. "net/http"
  12. "path"
  13. "strconv"
  14. "strings"
  15. "code.gitea.io/gitea/models"
  16. "code.gitea.io/gitea/modules/context"
  17. "code.gitea.io/gitea/modules/labelmsg"
  18. "code.gitea.io/gitea/modules/log"
  19. "code.gitea.io/gitea/modules/minio_ext"
  20. "code.gitea.io/gitea/modules/setting"
  21. "code.gitea.io/gitea/modules/storage"
  22. "code.gitea.io/gitea/modules/upload"
  23. "code.gitea.io/gitea/modules/worker"
  24. gouuid "github.com/satori/go.uuid"
  25. )
  26. const (
  27. //result of decompress
  28. DecompressSuccess = "0"
  29. DecompressFailed = "1"
  30. )
  31. type CloudBrainDataset struct {
  32. UUID string `json:"id"`
  33. Name string `json:"name"`
  34. Path string `json:"place"`
  35. UserName string `json:"provider"`
  36. CreateTime string `json:"created_at"`
  37. }
  38. type UploadForm struct {
  39. UploadID string `form:"uploadId"`
  40. UuID string `form:"uuid"`
  41. PartSize int64 `form:"size"`
  42. Offset int64 `form:"offset"`
  43. PartNumber int `form:"chunkNumber"`
  44. PartFile multipart.File `form:"file"`
  45. }
  46. func RenderAttachmentSettings(ctx *context.Context) {
  47. renderAttachmentSettings(ctx)
  48. }
  49. func renderAttachmentSettings(ctx *context.Context) {
  50. ctx.Data["IsAttachmentEnabled"] = setting.Attachment.Enabled
  51. ctx.Data["AttachmentStoreType"] = setting.Attachment.StoreType
  52. ctx.Data["AttachmentAllowedTypes"] = setting.Attachment.AllowedTypes
  53. ctx.Data["AttachmentMaxSize"] = setting.Attachment.MaxSize
  54. ctx.Data["AttachmentMaxFiles"] = setting.Attachment.MaxFiles
  55. }
  56. // UploadAttachment response for uploading issue's attachment
  57. func UploadAttachment(ctx *context.Context) {
  58. if !setting.Attachment.Enabled {
  59. ctx.Error(404, "attachment is not enabled")
  60. return
  61. }
  62. file, header, err := ctx.Req.FormFile("file")
  63. if err != nil {
  64. ctx.Error(500, fmt.Sprintf("FormFile: %v", err))
  65. return
  66. }
  67. defer file.Close()
  68. buf := make([]byte, 1024)
  69. n, _ := file.Read(buf)
  70. if n > 0 {
  71. buf = buf[:n]
  72. }
  73. err = upload.VerifyAllowedContentType(buf, strings.Split(setting.Attachment.AllowedTypes, ","))
  74. if err != nil {
  75. ctx.Error(400, err.Error())
  76. return
  77. }
  78. datasetID, _ := strconv.ParseInt(ctx.Req.FormValue("dataset_id"), 10, 64)
  79. attach, err := models.NewAttachment(&models.Attachment{
  80. IsPrivate: true,
  81. UploaderID: ctx.User.ID,
  82. Name: header.Filename,
  83. DatasetID: datasetID,
  84. }, buf, file)
  85. if err != nil {
  86. ctx.Error(500, fmt.Sprintf("NewAttachment: %v", err))
  87. return
  88. }
  89. log.Trace("New attachment uploaded: %s", attach.UUID)
  90. ctx.JSON(200, map[string]string{
  91. "uuid": attach.UUID,
  92. })
  93. }
  94. func UpdatePublicAttachment(ctx *context.Context) {
  95. file := ctx.Query("file")
  96. isPrivate, _ := strconv.ParseBool(ctx.Query("is_private"))
  97. attach, err := models.GetAttachmentByUUID(file)
  98. if err != nil {
  99. ctx.Error(404, err.Error())
  100. return
  101. }
  102. attach.IsPrivate = isPrivate
  103. models.UpdateAttachment(attach)
  104. }
  105. // DeleteAttachment response for deleting issue's attachment
  106. func DeleteAttachment(ctx *context.Context) {
  107. file := ctx.Query("file")
  108. attach, err := models.GetAttachmentByUUID(file)
  109. if err != nil {
  110. ctx.Error(400, err.Error())
  111. return
  112. }
  113. //issue 214: mod del-dataset permission
  114. if !models.CanDelAttachment(ctx.IsSigned, ctx.User, attach) {
  115. ctx.Error(403)
  116. return
  117. }
  118. err = models.DeleteAttachment(attach, true)
  119. if err != nil {
  120. ctx.Error(500, fmt.Sprintf("DeleteAttachment: %v", err))
  121. return
  122. }
  123. attachjson, _ := json.Marshal(attach)
  124. labelmsg.SendDeleteAttachToLabelSys(string(attachjson))
  125. DeleteAllUnzipFile(attach, "")
  126. TimeingCountData()
  127. _, err = models.DeleteFileChunkById(attach.UUID)
  128. if err != nil {
  129. ctx.Error(500, fmt.Sprintf("DeleteFileChunkById: %v", err))
  130. return
  131. }
  132. ctx.JSON(200, map[string]string{
  133. "uuid": attach.UUID,
  134. })
  135. }
  136. func DownloadUserIsOrgOrCollaboration(ctx *context.Context, attach *models.Attachment) bool {
  137. dataset, err := models.GetDatasetByID(attach.DatasetID)
  138. if err != nil {
  139. log.Info("query dataset error")
  140. } else {
  141. repo, err := models.GetRepositoryByID(dataset.RepoID)
  142. if err != nil {
  143. log.Info("query repo error.")
  144. } else {
  145. repo.GetOwner()
  146. if ctx.User != nil {
  147. if repo.Owner.IsOrganization() {
  148. if repo.Owner.IsUserPartOfOrg(ctx.User.ID) {
  149. log.Info("org user may visit the attach.")
  150. return true
  151. }
  152. }
  153. isCollaborator, _ := repo.IsCollaborator(ctx.User.ID)
  154. if isCollaborator {
  155. log.Info("Collaborator user may visit the attach.")
  156. return true
  157. }
  158. }
  159. }
  160. }
  161. return false
  162. }
  163. // GetAttachment serve attachements
  164. func GetAttachment(ctx *context.Context) {
  165. typeCloudBrain := ctx.QueryInt("type")
  166. err := checkTypeCloudBrain(typeCloudBrain)
  167. if err != nil {
  168. ctx.ServerError("checkTypeCloudBrain failed", err)
  169. return
  170. }
  171. attach, err := models.GetAttachmentByUUID(ctx.Params(":uuid"))
  172. if err != nil {
  173. if models.IsErrAttachmentNotExist(err) {
  174. ctx.Error(404)
  175. } else {
  176. ctx.ServerError("GetAttachmentByUUID", err)
  177. }
  178. return
  179. }
  180. repository, unitType, err := attach.LinkedRepository()
  181. if err != nil {
  182. ctx.ServerError("LinkedRepository", err)
  183. return
  184. }
  185. dataSet, err := attach.LinkedDataSet()
  186. if err != nil {
  187. ctx.ServerError("LinkedDataSet", err)
  188. return
  189. }
  190. if repository == nil && dataSet != nil {
  191. repository, _ = models.GetRepositoryByID(dataSet.RepoID)
  192. unitType = models.UnitTypeDatasets
  193. }
  194. if repository == nil { //If not linked
  195. //if !(ctx.IsSigned && attach.UploaderID == ctx.User.ID) && attach.IsPrivate { //We block if not the uploader
  196. //log.Info("ctx.IsSigned =" + fmt.Sprintf("%v", ctx.IsSigned))
  197. if !(ctx.IsSigned && attach.UploaderID == ctx.User.ID) && attach.IsPrivate && !DownloadUserIsOrgOrCollaboration(ctx, attach) { //We block if not the uploader
  198. ctx.Error(http.StatusNotFound)
  199. return
  200. }
  201. } else { //If we have the repository we check access
  202. perm, errPermission := models.GetUserRepoPermission(repository, ctx.User)
  203. if errPermission != nil {
  204. ctx.Error(http.StatusInternalServerError, "GetUserRepoPermission", errPermission.Error())
  205. return
  206. }
  207. if !perm.CanRead(unitType) {
  208. ctx.Error(http.StatusNotFound)
  209. return
  210. }
  211. }
  212. if dataSet != nil {
  213. isPermit, err := models.GetUserDataSetPermission(dataSet, ctx.User)
  214. if err != nil {
  215. ctx.Error(http.StatusInternalServerError, "GetUserDataSetPermission", err.Error())
  216. return
  217. }
  218. if !isPermit {
  219. ctx.Error(http.StatusNotFound)
  220. return
  221. }
  222. }
  223. //If we have matched and access to release or issue
  224. if setting.Attachment.StoreType == storage.MinioStorageType {
  225. url := ""
  226. if typeCloudBrain == models.TypeCloudBrainOne {
  227. url, err = storage.Attachments.PresignedGetURL(setting.Attachment.Minio.BasePath+attach.RelativePath(), attach.Name)
  228. if err != nil {
  229. ctx.ServerError("PresignedGetURL", err)
  230. return
  231. }
  232. } else {
  233. url, err = storage.ObsGetPreSignedUrl(attach.UUID, attach.Name)
  234. if err != nil {
  235. ctx.ServerError("ObsGetPreSignedUrl", err)
  236. return
  237. }
  238. }
  239. if err = increaseDownloadCount(attach, dataSet); err != nil {
  240. ctx.ServerError("Update", err)
  241. return
  242. }
  243. http.Redirect(ctx.Resp, ctx.Req.Request, url, http.StatusMovedPermanently)
  244. } else {
  245. fr, err := storage.Attachments.Open(attach.RelativePath())
  246. if err != nil {
  247. ctx.ServerError("Open", err)
  248. return
  249. }
  250. defer fr.Close()
  251. if err = increaseDownloadCount(attach, dataSet); err != nil {
  252. ctx.ServerError("Update", err)
  253. return
  254. }
  255. if err = ServeData(ctx, attach.Name, fr); err != nil {
  256. ctx.ServerError("ServeData", err)
  257. return
  258. }
  259. }
  260. }
  261. func increaseDownloadCount(attach *models.Attachment, dataSet *models.Dataset) error {
  262. if err := attach.IncreaseDownloadCount(); err != nil {
  263. return err
  264. }
  265. if dataSet != nil {
  266. if err := models.IncreaseDownloadCount(dataSet.ID); err != nil {
  267. return err
  268. }
  269. }
  270. return nil
  271. }
  272. // Get a presigned url for put object
  273. func GetPresignedPutObjectURL(ctx *context.Context) {
  274. if !setting.Attachment.Enabled {
  275. ctx.Error(404, "attachment is not enabled")
  276. return
  277. }
  278. err := upload.VerifyFileType(ctx.Params("file_type"), strings.Split(setting.Attachment.AllowedTypes, ","))
  279. if err != nil {
  280. ctx.Error(400, err.Error())
  281. return
  282. }
  283. if setting.Attachment.StoreType == storage.MinioStorageType {
  284. uuid := gouuid.NewV4().String()
  285. url, err := storage.Attachments.PresignedPutURL(models.AttachmentRelativePath(uuid))
  286. if err != nil {
  287. ctx.ServerError("PresignedPutURL", err)
  288. return
  289. }
  290. ctx.JSON(200, map[string]string{
  291. "uuid": uuid,
  292. "url": url,
  293. })
  294. } else {
  295. ctx.Error(404, "storage type is not enabled")
  296. return
  297. }
  298. }
  299. // AddAttachment response for add attachment record
  300. func AddAttachment(ctx *context.Context) {
  301. typeCloudBrain := ctx.QueryInt("type")
  302. err := checkTypeCloudBrain(typeCloudBrain)
  303. if err != nil {
  304. ctx.ServerError("checkTypeCloudBrain failed", err)
  305. return
  306. }
  307. uuid := ctx.Query("uuid")
  308. has := false
  309. if typeCloudBrain == models.TypeCloudBrainOne {
  310. has, err = storage.Attachments.HasObject(models.AttachmentRelativePath(uuid))
  311. if err != nil {
  312. ctx.ServerError("HasObject", err)
  313. return
  314. }
  315. } else {
  316. has, err = storage.ObsHasObject(setting.BasePath + models.AttachmentRelativePath(uuid) + "/" + uuid)
  317. if err != nil {
  318. ctx.ServerError("ObsHasObject", err)
  319. return
  320. }
  321. }
  322. if !has {
  323. ctx.Error(404, "attachment has not been uploaded")
  324. return
  325. }
  326. attachment, err := models.InsertAttachment(&models.Attachment{
  327. UUID: uuid,
  328. UploaderID: ctx.User.ID,
  329. IsPrivate: true,
  330. Name: ctx.Query("file_name"),
  331. Size: ctx.QueryInt64("size"),
  332. DatasetID: ctx.QueryInt64("dataset_id"),
  333. Type: typeCloudBrain,
  334. })
  335. if err != nil {
  336. ctx.Error(500, fmt.Sprintf("InsertAttachment: %v", err))
  337. return
  338. }
  339. if attachment.DatasetID != 0 {
  340. if isCanDecompress(attachment.Name) {
  341. if typeCloudBrain == models.TypeCloudBrainOne {
  342. err = worker.SendDecompressTask(contexExt.Background(), uuid, attachment.Name)
  343. if err != nil {
  344. log.Error("SendDecompressTask(%s) failed:%s", uuid, err.Error())
  345. } else {
  346. attachment.DecompressState = models.DecompressStateIng
  347. err = models.UpdateAttachment(attachment)
  348. if err != nil {
  349. log.Error("UpdateAttachment state(%s) failed:%s", uuid, err.Error())
  350. }
  351. }
  352. }
  353. //todo:decompress type_two
  354. }
  355. }
  356. ctx.JSON(200, map[string]string{
  357. "result_code": "0",
  358. })
  359. }
  360. func isCanDecompress(name string) bool {
  361. if strings.HasSuffix(name, ".zip") || strings.HasSuffix(name, ".tar.gz") || strings.HasSuffix(name, ".tgz") {
  362. return true
  363. }
  364. return false
  365. }
  366. func UpdateAttachmentDecompressState(ctx *context.Context) {
  367. uuid := ctx.Query("uuid")
  368. result := ctx.Query("result")
  369. attach, err := models.GetAttachmentByUUID(uuid)
  370. if err != nil {
  371. log.Error("GetAttachmentByUUID(%s) failed:%s", uuid, err.Error())
  372. return
  373. }
  374. if result == DecompressSuccess {
  375. attach.DecompressState = models.DecompressStateDone
  376. } else if result == DecompressFailed {
  377. attach.DecompressState = models.DecompressStateFailed
  378. } else {
  379. log.Error("result is error:", result)
  380. return
  381. }
  382. err = models.UpdateAttachment(attach)
  383. if err != nil {
  384. log.Error("UpdateAttachment(%s) failed:%s", uuid, err.Error())
  385. return
  386. }
  387. log.Info("start to send msg to labelsystem ")
  388. dataset, _ := models.GetDatasetByID(attach.DatasetID)
  389. var labelMap map[string]string
  390. labelMap = make(map[string]string)
  391. labelMap["UUID"] = uuid
  392. labelMap["Type"] = fmt.Sprint(attach.Type)
  393. labelMap["UploaderID"] = fmt.Sprint(attach.UploaderID)
  394. labelMap["RepoID"] = fmt.Sprint(dataset.RepoID)
  395. labelMap["AttachName"] = attach.Name
  396. attachjson, _ := json.Marshal(labelMap)
  397. labelmsg.SendAddAttachToLabelSys(string(attachjson))
  398. log.Info("end to send msg to labelsystem ")
  399. ctx.JSON(200, map[string]string{
  400. "result_code": "0",
  401. })
  402. }
  403. func GetSuccessChunks(ctx *context.Context) {
  404. fileMD5 := ctx.Query("md5")
  405. typeCloudBrain := ctx.QueryInt("type")
  406. var chunks string
  407. err := checkTypeCloudBrain(typeCloudBrain)
  408. if err != nil {
  409. ctx.ServerError("checkTypeCloudBrain failed", err)
  410. return
  411. }
  412. fileChunk, err := models.GetFileChunkByMD5AndUser(fileMD5, ctx.User.ID, typeCloudBrain)
  413. if err != nil {
  414. if models.IsErrFileChunkNotExist(err) {
  415. ctx.JSON(200, map[string]string{
  416. "uuid": "",
  417. "uploaded": "0",
  418. "uploadID": "",
  419. "chunks": "",
  420. })
  421. } else {
  422. ctx.ServerError("GetFileChunkByMD5", err)
  423. }
  424. return
  425. }
  426. isExist := false
  427. if typeCloudBrain == models.TypeCloudBrainOne {
  428. isExist, err = storage.Attachments.HasObject(models.AttachmentRelativePath(fileChunk.UUID))
  429. if err != nil {
  430. ctx.ServerError("HasObject failed", err)
  431. return
  432. }
  433. } else {
  434. isExist, err = storage.ObsHasObject(setting.BasePath + models.AttachmentRelativePath(fileChunk.UUID) + "/" + fileChunk.UUID)
  435. if err != nil {
  436. ctx.ServerError("ObsHasObject failed", err)
  437. return
  438. }
  439. }
  440. if isExist {
  441. if fileChunk.IsUploaded == models.FileNotUploaded {
  442. log.Info("the file has been uploaded but not recorded")
  443. fileChunk.IsUploaded = models.FileUploaded
  444. if err = models.UpdateFileChunk(fileChunk); err != nil {
  445. log.Error("UpdateFileChunk failed:", err.Error())
  446. }
  447. }
  448. } else {
  449. if fileChunk.IsUploaded == models.FileUploaded {
  450. log.Info("the file has been recorded but not uploaded")
  451. fileChunk.IsUploaded = models.FileNotUploaded
  452. if err = models.UpdateFileChunk(fileChunk); err != nil {
  453. log.Error("UpdateFileChunk failed:", err.Error())
  454. }
  455. }
  456. if typeCloudBrain == models.TypeCloudBrainOne {
  457. chunks, err = storage.GetPartInfos(fileChunk.UUID, fileChunk.UploadID)
  458. if err != nil {
  459. log.Error("GetPartInfos failed:%v", err.Error())
  460. }
  461. } else {
  462. chunks, err = storage.GetObsPartInfos(fileChunk.UUID, fileChunk.UploadID)
  463. if err != nil {
  464. log.Error("GetObsPartInfos failed:%v", err.Error())
  465. }
  466. }
  467. if err != nil {
  468. models.DeleteFileChunk(fileChunk)
  469. ctx.JSON(200, map[string]string{
  470. "uuid": "",
  471. "uploaded": "0",
  472. "uploadID": "",
  473. "chunks": "",
  474. })
  475. return
  476. }
  477. }
  478. var attachID int64
  479. attach, err := models.GetAttachmentByUUID(fileChunk.UUID)
  480. if err != nil {
  481. if models.IsErrAttachmentNotExist(err) {
  482. attachID = 0
  483. } else {
  484. ctx.ServerError("GetAttachmentByUUID", err)
  485. return
  486. }
  487. } else {
  488. attachID = attach.ID
  489. }
  490. if attach == nil {
  491. ctx.JSON(200, map[string]string{
  492. "uuid": fileChunk.UUID,
  493. "uploaded": strconv.Itoa(fileChunk.IsUploaded),
  494. "uploadID": fileChunk.UploadID,
  495. "chunks": string(chunks),
  496. "attachID": "0",
  497. "datasetID": "0",
  498. "fileName": "",
  499. "datasetName": "",
  500. })
  501. return
  502. }
  503. dataset, err := models.GetDatasetByID(attach.DatasetID)
  504. if err != nil {
  505. ctx.ServerError("GetDatasetByID", err)
  506. return
  507. }
  508. ctx.JSON(200, map[string]string{
  509. "uuid": fileChunk.UUID,
  510. "uploaded": strconv.Itoa(fileChunk.IsUploaded),
  511. "uploadID": fileChunk.UploadID,
  512. "chunks": string(chunks),
  513. "attachID": strconv.Itoa(int(attachID)),
  514. "datasetID": strconv.Itoa(int(attach.DatasetID)),
  515. "fileName": attach.Name,
  516. "datasetName": dataset.Title,
  517. })
  518. }
  519. func NewMultipart(ctx *context.Context) {
  520. if !setting.Attachment.Enabled {
  521. ctx.Error(404, "attachment is not enabled")
  522. return
  523. }
  524. err := upload.VerifyFileType(ctx.Query("fileType"), strings.Split(setting.Attachment.AllowedTypes, ","))
  525. if err != nil {
  526. ctx.Error(400, err.Error())
  527. return
  528. }
  529. typeCloudBrain := ctx.QueryInt("type")
  530. err = checkTypeCloudBrain(typeCloudBrain)
  531. if err != nil {
  532. ctx.ServerError("checkTypeCloudBrain failed", err)
  533. return
  534. }
  535. fileName := ctx.Query("file_name")
  536. if setting.Attachment.StoreType == storage.MinioStorageType {
  537. totalChunkCounts := ctx.QueryInt("totalChunkCounts")
  538. if totalChunkCounts > minio_ext.MaxPartsCount {
  539. ctx.Error(400, fmt.Sprintf("chunk counts(%d) is too much", totalChunkCounts))
  540. return
  541. }
  542. fileSize := ctx.QueryInt64("size")
  543. if fileSize > minio_ext.MaxMultipartPutObjectSize {
  544. ctx.Error(400, fmt.Sprintf("file size(%d) is too big", fileSize))
  545. return
  546. }
  547. uuid := gouuid.NewV4().String()
  548. var uploadID string
  549. if typeCloudBrain == models.TypeCloudBrainOne {
  550. uploadID, err = storage.NewMultiPartUpload(uuid)
  551. if err != nil {
  552. ctx.ServerError("NewMultipart", err)
  553. return
  554. }
  555. } else {
  556. uploadID, err = storage.NewObsMultiPartUpload(uuid, fileName)
  557. if err != nil {
  558. ctx.ServerError("NewObsMultiPartUpload", err)
  559. return
  560. }
  561. }
  562. _, err = models.InsertFileChunk(&models.FileChunk{
  563. UUID: uuid,
  564. UserID: ctx.User.ID,
  565. UploadID: uploadID,
  566. Md5: ctx.Query("md5"),
  567. Size: fileSize,
  568. TotalChunks: totalChunkCounts,
  569. Type: typeCloudBrain,
  570. })
  571. if err != nil {
  572. ctx.Error(500, fmt.Sprintf("InsertFileChunk: %v", err))
  573. return
  574. }
  575. ctx.JSON(200, map[string]string{
  576. "uuid": uuid,
  577. "uploadID": uploadID,
  578. })
  579. } else {
  580. ctx.Error(404, "storage type is not enabled")
  581. return
  582. }
  583. }
  584. func GetMultipartUploadUrl(ctx *context.Context) {
  585. uuid := ctx.Query("uuid")
  586. uploadID := ctx.Query("uploadID")
  587. partNumber := ctx.QueryInt("chunkNumber")
  588. size := ctx.QueryInt64("size")
  589. fileName := ctx.Query("file_name")
  590. typeCloudBrain := ctx.QueryInt("type")
  591. err := checkTypeCloudBrain(typeCloudBrain)
  592. if err != nil {
  593. ctx.ServerError("checkTypeCloudBrain failed", err)
  594. return
  595. }
  596. url := ""
  597. if typeCloudBrain == models.TypeCloudBrainOne {
  598. if size > minio_ext.MinPartSize {
  599. ctx.Error(400, fmt.Sprintf("chunk size(%d) is too big", size))
  600. return
  601. }
  602. url, err = storage.GenMultiPartSignedUrl(uuid, uploadID, partNumber, size)
  603. if err != nil {
  604. ctx.Error(500, fmt.Sprintf("GenMultiPartSignedUrl failed: %v", err))
  605. return
  606. }
  607. } else {
  608. url, err = storage.ObsGenMultiPartSignedUrl(uuid, uploadID, partNumber, fileName)
  609. if err != nil {
  610. ctx.Error(500, fmt.Sprintf("ObsGenMultiPartSignedUrl failed: %v", err))
  611. return
  612. }
  613. }
  614. ctx.JSON(200, map[string]string{
  615. "url": url,
  616. })
  617. }
  618. func GetObsKey(ctx *context.Context) {
  619. uuid := gouuid.NewV4().String()
  620. key := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, uuid)), "/")
  621. ctx.JSON(200, map[string]string{
  622. "uuid": uuid,
  623. "key": key,
  624. "access_key_id": setting.AccessKeyID,
  625. "secret_access_key": setting.SecretAccessKey,
  626. "server": setting.Endpoint,
  627. "bucket": setting.Bucket,
  628. })
  629. }
  630. func CompleteMultipart(ctx *context.Context) {
  631. uuid := ctx.Query("uuid")
  632. uploadID := ctx.Query("uploadID")
  633. typeCloudBrain := ctx.QueryInt("type")
  634. fileName := ctx.Query("file_name")
  635. err := checkTypeCloudBrain(typeCloudBrain)
  636. if err != nil {
  637. ctx.ServerError("checkTypeCloudBrain failed", err)
  638. return
  639. }
  640. fileChunk, err := models.GetFileChunkByUUID(uuid)
  641. if err != nil {
  642. if models.IsErrFileChunkNotExist(err) {
  643. ctx.Error(404)
  644. } else {
  645. ctx.ServerError("GetFileChunkByUUID", err)
  646. }
  647. return
  648. }
  649. if typeCloudBrain == models.TypeCloudBrainOne {
  650. _, err = storage.CompleteMultiPartUpload(uuid, uploadID)
  651. if err != nil {
  652. ctx.Error(500, fmt.Sprintf("CompleteMultiPartUpload failed: %v", err))
  653. return
  654. }
  655. } else {
  656. err = storage.CompleteObsMultiPartUpload(uuid, uploadID, fileName)
  657. if err != nil {
  658. ctx.Error(500, fmt.Sprintf("CompleteObsMultiPartUpload failed: %v", err))
  659. return
  660. }
  661. }
  662. fileChunk.IsUploaded = models.FileUploaded
  663. err = models.UpdateFileChunk(fileChunk)
  664. if err != nil {
  665. ctx.Error(500, fmt.Sprintf("UpdateFileChunk: %v", err))
  666. return
  667. }
  668. attachment, err := models.InsertAttachment(&models.Attachment{
  669. UUID: uuid,
  670. UploaderID: ctx.User.ID,
  671. IsPrivate: true,
  672. Name: fileName,
  673. Size: ctx.QueryInt64("size"),
  674. DatasetID: ctx.QueryInt64("dataset_id"),
  675. Type: typeCloudBrain,
  676. })
  677. if err != nil {
  678. ctx.Error(500, fmt.Sprintf("InsertAttachment: %v", err))
  679. return
  680. }
  681. if attachment.DatasetID != 0 {
  682. if isCanDecompress(attachment.Name) {
  683. if typeCloudBrain == models.TypeCloudBrainOne {
  684. err = worker.SendDecompressTask(contexExt.Background(), uuid, attachment.Name)
  685. if err != nil {
  686. log.Error("SendDecompressTask(%s) failed:%s", uuid, err.Error())
  687. } else {
  688. attachment.DecompressState = models.DecompressStateIng
  689. err = models.UpdateAttachment(attachment)
  690. if err != nil {
  691. log.Error("UpdateAttachment state(%s) failed:%s", uuid, err.Error())
  692. }
  693. }
  694. }
  695. if typeCloudBrain == models.TypeCloudBrainTwo {
  696. attachjson, _ := json.Marshal(attachment)
  697. labelmsg.SendDecompressAttachToLabelOBS(string(attachjson))
  698. }
  699. } else {
  700. dataset, _ := models.GetDatasetByID(attachment.DatasetID)
  701. var labelMap map[string]string
  702. labelMap = make(map[string]string)
  703. labelMap["UUID"] = uuid
  704. labelMap["Type"] = fmt.Sprint(attachment.Type)
  705. labelMap["UploaderID"] = fmt.Sprint(attachment.UploaderID)
  706. labelMap["RepoID"] = fmt.Sprint(dataset.RepoID)
  707. labelMap["AttachName"] = attachment.Name
  708. attachjson, _ := json.Marshal(labelMap)
  709. labelmsg.SendAddAttachToLabelSys(string(attachjson))
  710. }
  711. }
  712. ctx.JSON(200, map[string]string{
  713. "result_code": "0",
  714. })
  715. }
  716. func UpdateMultipart(ctx *context.Context) {
  717. uuid := ctx.Query("uuid")
  718. partNumber := ctx.QueryInt("chunkNumber")
  719. etag := ctx.Query("etag")
  720. fileChunk, err := models.GetFileChunkByUUID(uuid)
  721. if err != nil {
  722. if models.IsErrFileChunkNotExist(err) {
  723. ctx.Error(404)
  724. } else {
  725. ctx.ServerError("GetFileChunkByUUID", err)
  726. }
  727. return
  728. }
  729. fileChunk.CompletedParts = append(fileChunk.CompletedParts, strconv.Itoa(partNumber)+"-"+strings.Replace(etag, "\"", "", -1))
  730. err = models.UpdateFileChunk(fileChunk)
  731. if err != nil {
  732. ctx.Error(500, fmt.Sprintf("UpdateFileChunk: %v", err))
  733. return
  734. }
  735. ctx.JSON(200, map[string]string{
  736. "result_code": "0",
  737. })
  738. }
  739. func HandleUnDecompressAttachment() {
  740. attachs, err := models.GetUnDecompressAttachments()
  741. if err != nil {
  742. log.Error("GetUnDecompressAttachments failed:", err.Error())
  743. return
  744. }
  745. for _, attach := range attachs {
  746. err = worker.SendDecompressTask(contexExt.Background(), attach.UUID, attach.Name)
  747. if err != nil {
  748. log.Error("SendDecompressTask(%s) failed:%s", attach.UUID, err.Error())
  749. } else {
  750. attach.DecompressState = models.DecompressStateIng
  751. err = models.UpdateAttachment(attach)
  752. if err != nil {
  753. log.Error("UpdateAttachment state(%s) failed:%s", attach.UUID, err.Error())
  754. }
  755. }
  756. }
  757. return
  758. }
  759. func QueryAllPublicDataset(ctx *context.Context) {
  760. attachs, err := models.GetAllPublicAttachments()
  761. if err != nil {
  762. ctx.JSON(200, map[string]string{
  763. "result_code": "-1",
  764. "error_msg": err.Error(),
  765. "data": "",
  766. })
  767. return
  768. }
  769. queryDatasets(ctx, attachs)
  770. }
  771. func QueryPrivateDataset(ctx *context.Context) {
  772. username := ctx.Params(":username")
  773. attachs, err := models.GetPrivateAttachments(username)
  774. if err != nil {
  775. ctx.JSON(200, map[string]string{
  776. "result_code": "-1",
  777. "error_msg": err.Error(),
  778. "data": "",
  779. })
  780. return
  781. }
  782. for _, attach := range attachs {
  783. attach.Name = username
  784. }
  785. queryDatasets(ctx, attachs)
  786. }
  787. func queryDatasets(ctx *context.Context, attachs []*models.AttachmentUsername) {
  788. var datasets []CloudBrainDataset
  789. if len(attachs) == 0 {
  790. log.Info("dataset is null")
  791. ctx.JSON(200, map[string]string{
  792. "result_code": "0",
  793. "error_msg": "",
  794. "data": "",
  795. })
  796. return
  797. }
  798. for _, attch := range attachs {
  799. has, err := storage.Attachments.HasObject(models.AttachmentRelativePath(attch.UUID))
  800. if err != nil || !has {
  801. continue
  802. }
  803. datasets = append(datasets, CloudBrainDataset{strconv.FormatInt(attch.ID, 10),
  804. attch.Attachment.Name,
  805. setting.Attachment.Minio.RealPath +
  806. setting.Attachment.Minio.Bucket + "/" +
  807. setting.Attachment.Minio.BasePath +
  808. models.AttachmentRelativePath(attch.UUID) +
  809. attch.UUID,
  810. attch.Name,
  811. attch.CreatedUnix.Format("2006-01-02 03:04:05 PM")})
  812. }
  813. data, err := json.Marshal(datasets)
  814. if err != nil {
  815. log.Error("json.Marshal failed:", err.Error())
  816. ctx.JSON(200, map[string]string{
  817. "result_code": "-1",
  818. "error_msg": err.Error(),
  819. "data": "",
  820. })
  821. return
  822. }
  823. ctx.JSON(200, map[string]string{
  824. "result_code": "0",
  825. "error_msg": "",
  826. "data": string(data),
  827. })
  828. return
  829. }
  830. func checkTypeCloudBrain(typeCloudBrain int) error {
  831. if typeCloudBrain != models.TypeCloudBrainOne && typeCloudBrain != models.TypeCloudBrainTwo {
  832. log.Error("type error:", typeCloudBrain)
  833. return errors.New("type error")
  834. }
  835. return nil
  836. }